summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2022-12-13 14:27:16 +0100
committerJiri Kosina <jkosina@suse.cz>2022-12-13 14:27:16 +0100
commitcfd1f6c16f7deadfe5269a76c1516405c4466481 (patch)
tree44c0635e57e9627d9838c0d0d5801381b93ccccd /drivers
parenta9d9e46c755a189ccb44d91b8cf737742a975de8 (diff)
parentfd7b68b763c4dfa65e3c145c624427d5fd11202f (diff)
Merge branch 'for-6.2/apple' into for-linus
- new quirks for select Apple keyboards (Kerem Karabay, Aditya Garg)
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accessibility/speakup/speakup_dummy.c4
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c32
-rw-r--r--drivers/accessibility/speakup/spk_types.h2
-rw-r--r--drivers/accessibility/speakup/varhandlers.c12
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/device_pm.c15
-rw-r--r--drivers/acpi/irq.c8
-rw-r--r--drivers/acpi/resource.c16
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/thermal.c211
-rw-r--r--drivers/acpi/viot.c1
-rw-r--r--drivers/acpi/x86/s2idle.c12
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/android/binder_alloc.c55
-rw-r--r--drivers/android/binder_alloc.h12
-rw-r--r--drivers/android/binderfs.c31
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_imx.c4
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_xgene.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/base/arch_topology.c19
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c4
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devcoredump.c83
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/node.c143
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/power/domain.c4
-rw-r--r--drivers/base/property.c4
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c4
-rw-r--r--drivers/block/drbd/drbd_req.c14
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c11
-rw-r--r--drivers/block/ublk_drv.c2
-rw-r--r--drivers/block/virtio_blk.c110
-rw-r--r--drivers/block/zram/zram_drv.c64
-rw-r--r--drivers/block/zram/zram_drv.h15
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/bus/mvebu-mbus.c26
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/core.c55
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/char/ipmi/Kconfig6
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c29
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/random.c134
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/clk-generated.c5
-rw-r--r--drivers/clk/at91/clk-master.c9
-rw-r--r--drivers/clk/at91/clk-peripheral.c4
-rw-r--r--drivers/clk/at91/dt-compat.c108
-rw-r--r--drivers/clk/at91/sama5d2.c10
-rw-r--r--drivers/clk/baikal-t1/Kconfig12
-rw-r--r--drivers/clk/baikal-t1/Makefile1
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c84
-rw-r--r--drivers/clk/baikal-t1/ccu-div.h17
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.h8
-rw-r--r--drivers/clk/baikal-t1/ccu-rst.c217
-rw-r--r--drivers/clk/baikal-t1/ccu-rst.h67
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c260
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c123
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c43
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c11
-rw-r--r--drivers/clk/berlin/bg2.c5
-rw-r--r--drivers/clk/berlin/bg2q.c6
-rw-r--r--drivers/clk/clk-asm9260.c29
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-composite.c6
-rw-r--r--drivers/clk/clk-divider.c20
-rw-r--r--drivers/clk/clk-fixed-rate.c28
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/clk-lochnagar.c2
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-npcm7xx.c29
-rw-r--r--drivers/clk/clk-oxnas.c6
-rw-r--r--drivers/clk/clk-qoriq.c10
-rw-r--r--drivers/clk/clk-versaclock5.c163
-rw-r--r--drivers/clk/clk-versaclock7.c1309
-rw-r--r--drivers/clk/clk-xgene.c4
-rw-r--r--drivers/clk/clk.c317
-rw-r--r--drivers/clk/clk_test.c1569
-rw-r--r--drivers/clk/clkdev.c60
-rw-r--r--drivers/clk/davinci/Makefile4
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c3
-rw-r--r--drivers/clk/davinci/pll-dm644x.c81
-rw-r--r--drivers/clk/davinci/pll-dm646x.c85
-rw-r--r--drivers/clk/davinci/pll.c10
-rw-r--r--drivers/clk/davinci/pll.h6
-rw-r--r--drivers/clk/davinci/psc-dm644x.c85
-rw-r--r--drivers/clk/davinci/psc-dm646x.c82
-rw-r--r--drivers/clk/davinci/psc.c6
-rw-r--r--drivers/clk/davinci/psc.h6
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-composite-93.c171
-rw-r--r--drivers/clk/imx/clk-gate-93.c199
-rw-r--r--drivers/clk/imx/clk-imx8mp.c2
-rw-r--r--drivers/clk/imx/clk-imx93.c30
-rw-r--r--drivers/clk/imx/clk-scu.c6
-rw-r--r--drivers/clk/imx/clk.h9
-rw-r--r--drivers/clk/mediatek/Kconfig87
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c12
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c2
-rw-r--r--drivers/clk/mediatek/clk-gate.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c27
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c157
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c151
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c50
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c132
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c160
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c610
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c55
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c50
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c36
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c36
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c36
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c35
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c28
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c7
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c234
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c16
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c46
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c7
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c17
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c55
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c57
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c112
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c52
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c1155
-rw-r--r--drivers/clk/mediatek/clk-mtk.c52
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-mux.c48
-rw-r--r--drivers/clk/mediatek/clk-mux.h15
-rw-r--r--drivers/clk/mediatek/reset.c1
-rw-r--r--drivers/clk/meson/meson-aoclk.c5
-rw-r--r--drivers/clk/meson/meson-eeclk.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/microchip/Kconfig1
-rw-r--r--drivers/clk/microchip/Makefile1
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c290
-rw-r--r--drivers/clk/microchip/clk-mpfs.c375
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c113
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c6
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c2
-rw-r--r--drivers/clk/pistachio/clk.h4
-rw-r--r--drivers/clk/pxa/clk-pxa.c2
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/a53-pll.c4
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c33
-rw-r--r--drivers/clk/qcom/apss-ipq6018.c15
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c66
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h8
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c329
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c16
-rw-r--r--drivers/clk/qcom/clk-rpmh.c25
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c83
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c608
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c1829
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c341
-rw-r--r--drivers/clk/qcom/gcc-msm8909.c2731
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1020
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c552
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c436
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c56
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c6
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c20
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c400
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c48
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm6375.c3919
-rw-r--r--drivers/clk/qcom/gdsc.c35
-rw-r--r--drivers/clk/qcom/gdsc.h5
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c461
-rw-r--r--drivers/clk/qcom/kpss-xcc.c26
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c84
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c211
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c66
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c44
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c33
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c454
-rw-r--r--drivers/clk/qcom/reset.c4
-rw-r--r--drivers/clk/qcom/reset.h1
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c21
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c2
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c4
-rw-r--r--drivers/clk/rockchip/Kconfig7
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c1138
-rw-r--r--drivers/clk/rockchip/clk.c27
-rw-r--r--drivers/clk/rockchip/clk.h36
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c6
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c207
-rw-r--r--drivers/clk/samsung/clk-exynos850.c682
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c401
-rw-r--r--drivers/clk/spear/spear3xx_clock.c1
-rw-r--r--drivers/clk/spear/spear6xx_clock.c1
-rw-r--r--drivers/clk/sprd/Kconfig6
-rw-r--r--drivers/clk/sprd/Makefile1
-rw-r--r--drivers/clk/sprd/common.c9
-rw-r--r--drivers/clk/sprd/ums512-clk.c2202
-rw-r--r--drivers/clk/st/clkgen-fsyn.c5
-rw-r--r--drivers/clk/st/clkgen-mux.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c28
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c9
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124.c2
-rw-r--r--drivers/clk/tegra/clk-tegra20.c2
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c9
-rw-r--r--drivers/clk/ti/clk.c5
-rw-r--r--drivers/clk/xilinx/Kconfig12
-rw-r--r--drivers/clk/xilinx/Makefile1
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c (renamed from drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c)35
-rw-r--r--drivers/clk/zynqmp/clkc.c19
-rw-r--r--drivers/clk/zynqmp/divider.c9
-rw-r--r--drivers/clk/zynqmp/pll.c31
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c83
-rw-r--r--drivers/clocksource/renesas-ostm.c2
-rw-r--r--drivers/clocksource/timer-gxp.c7
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c6
-rw-r--r--drivers/clocksource/timer-sun4i.c3
-rw-r--r--drivers/clocksource/timer-ti-dm.c681
-rw-r--r--drivers/comedi/comedi_fops.c8
-rw-r--r--drivers/counter/104-quad-8.c41
-rw-r--r--drivers/counter/Kconfig15
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter-chrdev.c137
-rw-r--r--drivers/counter/counter-core.c14
-rw-r--r--drivers/counter/counter-sysfs.c304
-rw-r--r--drivers/counter/ftm-quaddec.c1
-rw-r--r--drivers/counter/intel-qep.c1
-rw-r--r--drivers/counter/interrupt-cnt.c12
-rw-r--r--drivers/counter/microchip-tcb-capture.c1
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c1
-rw-r--r--drivers/counter/stm32-timer-cnt.c1
-rw-r--r--drivers/counter/ti-ecap-capture.c614
-rw-r--r--drivers/counter/ti-eqep.c1
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c7
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/aspeed/Kconfig48
-rw-r--r--drivers/crypto/aspeed/Makefile7
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c1133
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c1391
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c284
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h298
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/cipher.h2
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/ccp/sev-dev.c78
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c250
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c216
-rw-r--r--drivers/crypto/hisilicon/qm.c906
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h34
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c456
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c160
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h3
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c134
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c266
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c60
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c67
-rw-r--r--drivers/crypto/keembay/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c24
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c56
-rw-r--r--drivers/crypto/qce/aead.c4
-rw-r--r--drivers/crypto/qce/sha.c8
-rw-r--r--drivers/crypto/qce/skcipher.c8
-rw-r--r--drivers/crypto/qcom-rng.c7
-rw-r--r--drivers/crypto/sahara.c22
-rw-r--r--drivers/dax/hmem/device.c4
-rw-r--r--drivers/dax/kmem.c42
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c45
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c12
-rw-r--r--drivers/dma/hisi_dma.c650
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/hsu/hsu.h12
-rw-r--r--drivers/dma/hsu/pci.c47
-rw-r--r--drivers/dma/idxd/device.c38
-rw-r--r--drivers/dma/idxd/idxd.h10
-rw-r--r--drivers/dma/idxd/init.c36
-rw-r--r--drivers/dma/idxd/irq.c13
-rw-r--r--drivers/dma/idxd/registers.h35
-rw-r--r--drivers/dma/idxd/sysfs.c187
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/mxs-dma.c11
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/qcom/gpi.c7
-rw-r--r--drivers/dma/qcom/qcom_adm.c22
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c8
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/stm32-dma.c136
-rw-r--r--drivers/dma/stm32-dmamux.c12
-rw-r--r--drivers/dma/stm32-mdma.c70
-rw-r--r--drivers/dma/ti/edma.c40
-rw-r--r--drivers/dma/ti/k3-psil-j7200.c67
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c79
-rw-r--r--drivers/dma/ti/k3-udma.c37
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c12
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/sifive_edac.c12
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c232
-rw-r--r--drivers/firmware/dmi_scan.c10
-rw-r--r--drivers/firmware/efi/Kconfig45
-rw-r--r--drivers/firmware/efi/efi-init.c61
-rw-r--r--drivers/firmware/efi/efi.c17
-rw-r--r--drivers/firmware/efi/libstub/Makefile33
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot70
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c27
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c290
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c118
-rw-r--r--drivers/firmware/efi/libstub/efistub.h69
-rw-r--r--drivers/firmware/efi/libstub/fdt.c175
-rw-r--r--drivers/firmware/efi/libstub/file.c23
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c30
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c102
-rw-r--r--drivers/firmware/efi/libstub/mem.c93
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c25
-rw-r--r--drivers/firmware/efi/libstub/relocate.c21
-rw-r--r--drivers/firmware/efi/libstub/systable.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c33
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S143
-rw-r--r--drivers/firmware/efi/libstub/zboot.c302
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds44
-rw-r--r--drivers/firmware/google/gsmi.c9
-rw-r--r--drivers/fpga/dfl-pci.c18
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c3
-rw-r--r--drivers/fpga/microchip-spi.c1
-rw-r--r--drivers/fsi/fsi-core.c11
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c2
-rw-r--r--drivers/fsi/fsi-master.h2
-rw-r--r--drivers/fsi/fsi-occ.c66
-rw-r--r--drivers/fsi/fsi-sbefifo.c15
-rw-r--r--drivers/gnss/core.c4
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c5
-rw-r--r--drivers/gpio/gpio-104-idi-48.c5
-rw-r--r--drivers/gpio/gpio-104-idio-16.c5
-rw-r--r--drivers/gpio/gpio-adp5588.c446
-rw-r--r--drivers/gpio/gpio-exar.c40
-rw-r--r--drivers/gpio/gpio-imx-scu.c139
-rw-r--r--drivers/gpio/gpio-mlxbf2.c6
-rw-r--r--drivers/gpio/gpio-mt7621.c7
-rw-r--r--drivers/gpio/gpio-pca953x.c177
-rw-r--r--drivers/gpio/gpio-rockchip.c28
-rw-r--r--drivers/gpio/gpio-tc3589x.c8
-rw-r--r--drivers/gpio/gpio-twl4030.c26
-rw-r--r--drivers/gpio/gpio-ucb1400.c1
-rw-r--r--drivers/gpio/gpio-ws16c48.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c53
-rw-r--r--drivers/gpio/gpiolib-acpi.h2
-rw-r--r--drivers/gpio/gpiolib-cdev.c18
-rw-r--r--drivers/gpio/gpiolib-of.c184
-rw-r--r--drivers/gpio/gpiolib.c132
-rw-r--r--drivers/gpu/drm/Kconfig12
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h140
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h5
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c12
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c13
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_print.c48
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c78
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h46
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c212
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c109
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c36
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c3
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c2
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c29
-rw-r--r--drivers/hid/hid-apple.c141
-rw-r--r--drivers/hid/hid-debug.c3
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hsi/clients/nokia-modem.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c8
-rw-r--r--drivers/hv/connection.c33
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/occ/common.c11
-rw-r--r--drivers/hwmon/occ/p9_sbe.c26
-rw-r--r--drivers/hwtracing/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/Kconfig4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c213
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c34
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c29
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h74
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/ptt/Kconfig12
-rw-r--r--drivers/hwtracing/ptt/Makefile2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c1046
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.h200
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h7
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c13
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c1
-rw-r--r--drivers/i2c/i2c-core-acpi.c40
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core.h4
-rw-r--r--drivers/i3c/master.c7
-rw-r--r--drivers/iio/accel/Kconfig13
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adxl313.h35
-rw-r--r--drivers/iio/accel/adxl313_core.c202
-rw-r--r--drivers/iio/accel/adxl313_i2c.c74
-rw-r--r--drivers/iio/accel/adxl313_spi.c63
-rw-r--r--drivers/iio/accel/adxl345_core.c7
-rw-r--r--drivers/iio/accel/bma400.h14
-rw-r--r--drivers/iio/accel/bma400_core.c346
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c15
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/accel/msa311.c1321
-rw-r--r--drivers/iio/adc/Kconfig47
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c27
-rw-r--r--drivers/iio/adc/ad7124.c15
-rw-r--r--drivers/iio/adc/ad7768-1.c17
-rw-r--r--drivers/iio/adc/ad7923.c11
-rw-r--r--drivers/iio/adc/ad9467.c17
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c714
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c8
-rw-r--r--drivers/iio/adc/ingenic-adc.c23
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c18
-rw-r--r--drivers/iio/adc/ltc2496.c9
-rw-r--r--drivers/iio/adc/ltc2497-core.c12
-rw-r--r--drivers/iio/adc/ltc2497.c77
-rw-r--r--drivers/iio/adc/ltc2497.h6
-rw-r--r--drivers/iio/adc/max11205.c183
-rw-r--r--drivers/iio/adc/max1363.c6
-rw-r--r--drivers/iio/adc/mcp3911.c257
-rw-r--r--drivers/iio/adc/mt6360-adc.c2
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c58
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c73
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c44
-rw-r--r--drivers/iio/adc/rtq6056.c661
-rw-r--r--drivers/iio/adc/stm32-adc-core.c59
-rw-r--r--drivers/iio/adc/stm32-adc-core.h31
-rw-r--r--drivers/iio/adc/stm32-adc.c128
-rw-r--r--drivers/iio/adc/ti-ads131e08.c19
-rw-r--r--drivers/iio/adc/ti-tsc2046.c69
-rw-r--r--drivers/iio/adc/xilinx-ams.c15
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c18
-rw-r--r--drivers/iio/addac/Kconfig16
-rw-r--r--drivers/iio/addac/Makefile1
-rw-r--r--drivers/iio/addac/stx104.c (renamed from drivers/iio/adc/stx104.c)0
-rw-r--r--drivers/iio/cdc/Kconfig10
-rw-r--r--drivers/iio/cdc/Makefile1
-rw-r--r--drivers/iio/cdc/ad7746.c (renamed from drivers/staging/iio/cdc/ad7746.c)441
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c2
-rw-r--r--drivers/iio/dac/ad5593r.c50
-rw-r--r--drivers/iio/frequency/adf4371.c17
-rw-r--r--drivers/iio/frequency/admv1014.c3
-rw-r--r--drivers/iio/frequency/adrf6780.c16
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis16475.c15
-rw-r--r--drivers/iio/imu/bno055/Kconfig25
-rw-r--r--drivers/iio/imu/bno055/Makefile10
-rw-r--r--drivers/iio/imu/bno055/bno055.c1685
-rw-r--r--drivers/iio/imu/bno055/bno055.h13
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c57
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c560
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.c14
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.h104
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c5
-rw-r--r--drivers/iio/industrialio-core.c29
-rw-r--r--drivers/iio/industrialio-event.c14
-rw-r--r--drivers/iio/industrialio-trigger.c1
-rw-r--r--drivers/iio/inkern.c272
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/ltrf216a.c550
-rw-r--r--drivers/iio/light/st_uvis25_core.c9
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c2
-rw-r--r--drivers/iio/light/st_uvis25_spi.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig4
-rw-r--r--drivers/iio/magnetometer/hmc5843.h13
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c14
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c855
-rw-r--r--drivers/iio/pressure/Kconfig6
-rw-r--r--drivers/iio/pressure/bmp280-core.c974
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c15
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c55
-rw-r--r--drivers/iio/pressure/bmp280-spi.c5
-rw-r--r--drivers/iio/pressure/bmp280.h164
-rw-r--r--drivers/iio/pressure/dlhl60d.c5
-rw-r--r--drivers/iio/pressure/dps310.c262
-rw-r--r--drivers/iio/pressure/icp10100.c10
-rw-r--r--drivers/iio/pressure/st_pressure.h2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c70
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c5
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c5
-rw-r--r--drivers/iio/proximity/srf04.c10
-rw-r--r--drivers/iio/proximity/sx9310.c8
-rw-r--r--drivers/iio/proximity/sx9324.c8
-rw-r--r--drivers/iio/proximity/sx9360.c8
-rw-r--r--drivers/iio/temperature/mlx90614.c41
-rw-r--r--drivers/iio/temperature/mlx90632.c61
-rw-r--r--drivers/iio/test/iio-test-rescale.c4
-rw-r--r--drivers/infiniband/core/cm.c104
-rw-r--r--drivers/infiniband/core/cma.c90
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/lag.c5
-rw-r--r--drivers/infiniband/core/sa_query.c235
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c5
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c8
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c4
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h289
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c11
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c13
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c17
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c15
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c35
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c5
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h3
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c164
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c220
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c51
-rw-r--r--drivers/infiniband/hw/irdma/type.h1
-rw-r--r--drivers/infiniband/hw/irdma/user.h1
-rw-r--r--drivers/infiniband/hw/irdma/utils.c3
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c69
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c57
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h36
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c61
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c53
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/sw/siw/Kconfig5
-rw-r--r--drivers/infiniband/sw/siw/siw.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile10
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h86
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c29
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h88
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c66
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c6
-rw-r--r--drivers/input/ff-core.c3
-rw-r--r--drivers/input/ff-memless.c3
-rw-r--r--drivers/input/gameport/emu10k1-gp.c3
-rw-r--r--drivers/input/gameport/lightning.c3
-rw-r--r--drivers/input/gameport/ns558.c3
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/a3d.c3
-rw-r--r--drivers/input/joystick/adc-joystick.c65
-rw-r--r--drivers/input/joystick/adi.c3
-rw-r--r--drivers/input/joystick/amijoy.c3
-rw-r--r--drivers/input/joystick/analog.c3
-rw-r--r--drivers/input/joystick/cobra.c3
-rw-r--r--drivers/input/joystick/db9.c3
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/joystick/gf2k.c3
-rw-r--r--drivers/input/joystick/grip.c3
-rw-r--r--drivers/input/joystick/guillemot.c3
-rw-r--r--drivers/input/joystick/interact.c3
-rw-r--r--drivers/input/joystick/joydump.c3
-rw-r--r--drivers/input/joystick/magellan.c3
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/spaceball.c3
-rw-r--r--drivers/input/joystick/spaceorb.c3
-rw-r--r--drivers/input/joystick/stinger.c3
-rw-r--r--drivers/input/joystick/tmdc.c3
-rw-r--r--drivers/input/joystick/turbografx.c3
-rw-r--r--drivers/input/joystick/twidjoy.c3
-rw-r--r--drivers/input/joystick/warrior.c3
-rw-r--r--drivers/input/joystick/xpad.c455
-rw-r--r--drivers/input/joystick/zhenhua.c3
-rw-r--r--drivers/input/keyboard/Kconfig18
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5588-keys.c726
-rw-r--r--drivers/input/keyboard/amikbd.c3
-rw-r--r--drivers/input/keyboard/applespi.c4
-rw-r--r--drivers/input/keyboard/atakbd.c3
-rw-r--r--drivers/input/keyboard/atkbd.c16
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c13
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c1
-rw-r--r--drivers/input/keyboard/lkkbd.c11
-rw-r--r--drivers/input/keyboard/lm8333.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c7
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c46
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c21
-rw-r--r--drivers/input/keyboard/newtonkbd.c3
-rw-r--r--drivers/input/keyboard/pinephone-keyboard.c468
-rw-r--r--drivers/input/keyboard/st-keyscan.c10
-rw-r--r--drivers/input/keyboard/stowaway.c3
-rw-r--r--drivers/input/keyboard/sunkbd.c3
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c3
-rw-r--r--drivers/input/misc/Kconfig27
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ibm-panel.c200
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs7222.c16
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/rt5120-pwrkey.c120
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c13
-rw-r--r--drivers/input/mouse/elan_i2c_core.c7
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/inport.c3
-rw-r--r--drivers/input/mouse/logibm.c3
-rw-r--r--drivers/input/mouse/pc110pad.c3
-rw-r--r--drivers/input/mouse/psmouse-base.c22
-rw-r--r--drivers/input/mouse/sermouse.c3
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/mouse/vsxxxaa.c7
-rw-r--r--drivers/input/rmi4/rmi_f03.c2
-rw-r--r--drivers/input/rmi4/rmi_f34.c32
-rw-r--r--drivers/input/rmi4/rmi_f34.h17
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c349
-rw-r--r--drivers/input/rmi4/rmi_f54.c8
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/ambakmi.c4
-rw-r--r--drivers/input/serio/ams_delta_serio.c4
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/ct82c710.c5
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h (renamed from drivers/input/serio/i8042-x86ia64io.h)18
-rw-r--r--drivers/input/serio/i8042-sparcio.h27
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/serio/i8042.h4
-rw-r--r--drivers/input/serio/libps2.c5
-rw-r--r--drivers/input/serio/olpc_apsp.c8
-rw-r--r--drivers/input/serio/parkbd.c2
-rw-r--r--drivers/input/serio/pcips2.c4
-rw-r--r--drivers/input/serio/ps2-gpio.c4
-rw-r--r--drivers/input/serio/ps2mult.c2
-rw-r--r--drivers/input/serio/q40kbd.c7
-rw-r--r--drivers/input/serio/rpckbd.c7
-rw-r--r--drivers/input/serio/sa1111ps2.c4
-rw-r--r--drivers/input/serio/serio.c3
-rw-r--r--drivers/input/serio/serport.c2
-rw-r--r--drivers/input/serio/sun4i-ps2.c4
-rw-r--r--drivers/input/tablet/acecad.c5
-rw-r--r--drivers/input/tablet/aiptek.c20
-rw-r--r--drivers/input/tablet/hanwang.c5
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c8
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c155
-rw-r--r--drivers/input/touchscreen/chipone_icn8505.c30
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c12
-rw-r--r--drivers/input/touchscreen/gunze.c3
-rw-r--r--drivers/input/touchscreen/sur40.c6
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c6
-rw-r--r--drivers/interconnect/core.c10
-rw-r--r--drivers/interconnect/imx/imx.c4
-rw-r--r--drivers/interconnect/imx/imx.h2
-rw-r--r--drivers/interconnect/imx/imx8mm.c4
-rw-r--r--drivers/interconnect/imx/imx8mn.c4
-rw-r--r--drivers/interconnect/imx/imx8mp.c4
-rw-r--r--drivers/interconnect/imx/imx8mq.c4
-rw-r--r--drivers/interconnect/qcom/Kconfig2
-rw-r--r--drivers/interconnect/qcom/icc-common.c3
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c4
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c4
-rw-r--r--drivers/interconnect/qcom/msm8974.c4
-rw-r--r--drivers/interconnect/qcom/osm-l3.c4
-rw-r--r--drivers/interconnect/qcom/sm8450.c4
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c47
-rw-r--r--drivers/iommu/amd/io_pgtable.c76
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c415
-rw-r--r--drivers/iommu/amd/iommu.c164
-rw-r--r--drivers/iommu/apple-dart.c57
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c83
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c97
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c6
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.h42
-rw-r--r--drivers/iommu/exynos-iommu.c9
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/cap_audit.c4
-rw-r--r--drivers/iommu/intel/iommu.c95
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/irq_remapping.c6
-rw-r--r--drivers/iommu/intel/pasid.c12
-rw-r--r--drivers/iommu/intel/svm.c62
-rw-r--r--drivers/iommu/io-pgtable-arm.c71
-rw-r--r--drivers/iommu/io-pgtable-dart.c469
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu.c154
-rw-r--r--drivers/iommu/iova.c13
-rw-r--r--drivers/iommu/ipmmu-vmsa.c35
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c45
-rw-r--r--drivers/iommu/mtk_iommu_v1.c13
-rw-r--r--drivers/iommu/omap-iommu-debug.c6
-rw-r--r--drivers/iommu/omap-iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/s390-iommu.c15
-rw-r--r--drivers/iommu/sprd-iommu.c5
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c29
-rw-r--r--drivers/iommu/virtio-iommu.c32
-rw-r--r--drivers/ipack/devices/ipoctal.c2
-rw-r--r--drivers/ipack/ipack.c5
-rw-r--r--drivers/irqchip/Kconfig17
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c453
-rw-r--r--drivers/irqchip/irq-ls-extirq.c87
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c2
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c134
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/leds/leds-pca963x.c22
-rw-r--r--drivers/macintosh/therm_windtunnel.c6
-rw-r--r--drivers/mailbox/apple-mailbox.c63
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c8
-rw-r--r--drivers/mailbox/imx-mailbox.c10
-rw-r--r--drivers/mailbox/mailbox-mpfs.c25
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-bufio.c13
-rw-r--r--drivers/md/dm-cache-policy.h2
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-ioctl.c78
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-table.c78
-rw-r--r--drivers/md/dm-verity-target.c18
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c6
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c22
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c1
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c290
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c14
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h147
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c86
-rw-r--r--drivers/message/fusion/mptctl.c6
-rw-r--r--drivers/mfd/Kconfig44
-rw-r--r--drivers/mfd/Makefile12
-rw-r--r--drivers/mfd/da9062-core.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c34
-rw-r--r--drivers/mfd/htc-i2cpld.c60
-rw-r--r--drivers/mfd/intel-lpss-pci.c141
-rw-r--r--drivers/mfd/intel-m10-bmc.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c8
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c158
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h25
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c139
-rw-r--r--drivers/mfd/lp8788-irq.c3
-rw-r--r--drivers/mfd/lp8788.c12
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/mfd-core.c9
-rw-r--r--drivers/mfd/mt6370.c312
-rw-r--r--drivers/mfd/mt6370.h99
-rw-r--r--drivers/mfd/ocelot-spi.c1
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c1
-rw-r--r--drivers/mfd/rk808.c16
-rw-r--r--drivers/mfd/rt5120.c124
-rw-r--r--drivers/mfd/sm501.c7
-rw-r--r--drivers/mfd/stmpe.c49
-rw-r--r--drivers/mfd/syscon.c8
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/altera-stapl/altera.c8
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c8
-rw-r--r--drivers/misc/cxl/fault.c45
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/habanalabs/Kconfig1
-rw-r--r--drivers/misc/habanalabs/Makefile8
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c127
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c75
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c35
-rw-r--r--drivers/misc/habanalabs/common/device.c147
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c184
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c44
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c123
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c4
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c24
-rw-r--r--drivers/misc/habanalabs/common/memory.c57
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c10
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c31
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c185
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2.c673
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2P.h10
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_masks.h21
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_security.c26
-rw-r--r--drivers/misc/habanalabs/goya/goya.c62
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h103
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h37
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h185
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h57
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/Kconfig13
-rw-r--r--drivers/misc/mchp_pci1xxxx/Makefile1
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c165
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h28
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c427
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/misc/mei/gsc-me.c1
-rw-r--r--drivers/misc/mei/hw-txe.c2
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c34
-rw-r--r--drivers/misc/sgi-xp/xp.h4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c16
-rw-r--r--drivers/misc/xilinx_sdfec.c3
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mmc/core/quirks.h6
-rw-r--r--drivers/mmc/core/sdio_uart.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c21
-rw-r--r--drivers/mmc/host/sdhci-sprd.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/devices/docg3.c21
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/inftlcore.c6
-rw-r--r--drivers/mtd/maps/physmap-core.c3
-rw-r--r--drivers/mtd/mtdchar.c139
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c33
-rw-r--r--drivers/mtd/mtdpstore.c2
-rw-r--r--drivers/mtd/mtdswap.c6
-rw-r--r--drivers/mtd/nand/bbt.c7
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c16
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig24
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Kconfig49
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile8
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c28
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c28
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c8
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c90
-rw-r--r--drivers/mtd/nand/raw/nand_base.c15
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c8
-rw-r--r--drivers/mtd/nand/raw/nandsim.c8
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c17
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c13
-rw-r--r--drivers/mtd/nand/spi/core.c10
-rw-r--r--drivers/mtd/nftlcore.c6
-rw-r--r--drivers/mtd/parsers/Kconfig10
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c84
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c4
-rw-r--r--drivers/mtd/sm_ftl.c4
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c12
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c8
-rw-r--r--drivers/mtd/tests/readtest.c2
-rw-r--r--drivers/mtd/tests/speedtest.c2
-rw-r--r--drivers/mtd/tests/stresstest.c19
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/build.c14
-rw-r--r--drivers/mtd/ubi/cdev.c4
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h6
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c10
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/ubi.h9
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/wl.c8
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c79
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c83
-rw-r--r--drivers/net/ethernet/adi/adin1110.c13
-rw-r--r--drivers/net/ethernet/broadcom/Makefile5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c5
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c17
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/filter.h4
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c5
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c19
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/dp83822.c3
-rw-r--r--drivers/net/phy/dp83867.c8
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/phy/sfp-bus.c3
-rw-r--r--drivers/net/pse-pd/Kconfig1
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wwan/wwan_core.c1
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c24
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/region_devs.c10
-rw-r--r--drivers/nvdimm/security.c2
-rw-r--r--drivers/nvme/common/auth.c2
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/hwmon.c32
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvmem/Kconfig313
-rw-r--r--drivers/nvmem/Makefile120
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/core.c27
-rw-r--r--drivers/nvmem/lan9662-otpc.c222
-rw-r--r--drivers/nvmem/u-boot-env.c219
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/of/fdt.c17
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_private.h5
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/parisc/eisa_enumerator.c8
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c33
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c164
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c128
-rw-r--r--drivers/pci/controller/pci-aardvark.c4
-rw-r--r--drivers/pci/controller/pci-ftpci100.c21
-rw-r--r--drivers/pci/controller/pci-mvebu.c13
-rw-r--r--drivers/pci/controller/pci-tegra.c11
-rw-r--r--drivers/pci/controller/pcie-apple.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c17
-rw-r--r--drivers/pci/msi/msi.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-bridge-emul.c48
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-driver.c30
-rw-r--r--drivers/pci/pci-sysfs.c108
-rw-r--r--drivers/pci/pci.c51
-rw-r--r--drivers/pci/pci.h63
-rw-r--r--drivers/pci/pcie/aspm.c258
-rw-r--r--drivers/pci/pcie/dpc.c15
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/probe.c13
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/setup-bus.c228
-rw-r--r--drivers/pci/setup-res.c11
-rw-r--r--drivers/pci/xen-pcifront.c161
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c4
-rw-r--r--drivers/perf/arm_spe_pmu.c4
-rw-r--r--drivers/perf/riscv_pmu_legacy.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c38
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c6
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c32
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c8
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c45
-rw-r--r--drivers/phy/mediatek/phy-mtk-dp.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c238
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c246
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c33
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h7
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h8
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c164
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c74
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c24
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.h5
-rw-r--r--drivers/phy/mediatek/phy-mtk-pcie.c17
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c223
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c78
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c46
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c99
-rw-r--r--drivers/phy/microchip/lan966x_serdes_regs.h42
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c221
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c819
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c251
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c360
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h333
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c270
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c468
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c27
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c273
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hsic.c6
-rw-r--r--drivers/phy/rockchip/Kconfig9
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c24
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c204
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c10
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c322
-rw-r--r--drivers/phy/sunplus/Kconfig12
-rw-r--r--drivers/phy/sunplus/Makefile2
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c296
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c28
-rw-r--r--drivers/phy/tegra/xusb.c25
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c47
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c248
-rw-r--r--drivers/pinctrl/Kconfig37
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c11
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6318.c121
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63268.c139
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6328.c91
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c20
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6362.c121
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6368.c91
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c4
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c1673
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h2259
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c26
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c295
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c29
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c117
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h30
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c8
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c5
-rw-r--r--drivers/pinctrl/pinctrl-amd.c34
-rw-r--r--drivers/pinctrl/pinctrl-at91.c105
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c1419
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c31
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c3
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c5
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c27
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c346
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-st.c34
-rw-r--r--drivers/pinctrl/qcom/Kconfig57
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c207
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c240
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c79
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c8
-rw-r--r--drivers/pinctrl/starfive/Kconfig18
-rw-r--r--drivers/pinctrl/starfive/Makefile3
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c (renamed from drivers/pinctrl/pinctrl-starfive.c)10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c5
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c9
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/loongarch/Kconfig31
-rw-r--r--drivers/platform/loongarch/Makefile1
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c624
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c6
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c7
-rw-r--r--drivers/power/reset/qcom-pon.c1
-rw-r--r--drivers/power/supply/Kconfig21
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_chargalg.c8
-rw-r--r--drivers/power/supply/adp5061.c9
-rw-r--r--drivers/power/supply/bq25890_charger.c30
-rw-r--r--drivers/power/supply/bq27xxx_battery.c60
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/cw2015_battery.c13
-rw-r--r--drivers/power/supply/max1721x_battery.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c961
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/rk817_charger.c1211
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/ptp/ptp_ocp.c1
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-lpss-pci.c48
-rw-r--r--drivers/pwm/pwm-lpss-platform.c40
-rw-r--r--drivers/pwm/pwm-lpss.c46
-rw-r--r--drivers/pwm/pwm-lpss.h12
-rw-r--r--drivers/pwm/pwm-rockchip.c18
-rw-r--r--drivers/pwm/sysfs.c20
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c4
-rw-r--r--drivers/remoteproc/imx_rproc.c14
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c16
-rw-r--r--drivers/remoteproc/remoteproc_core.c223
-rw-r--r--drivers/remoteproc/remoteproc_internal.h38
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c183
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-mpfs.c157
-rw-r--r--drivers/rpmsg/rpmsg_char.c9
-rw-r--r--drivers/rtc/Kconfig1
-rw-r--r--drivers/rtc/rtc-cmos.c29
-rw-r--r--drivers/rtc/rtc-ds1685.c2
-rw-r--r--drivers/rtc/rtc-gamecube.c11
-rw-r--r--drivers/rtc/rtc-isl12022.c161
-rw-r--r--drivers/rtc/rtc-jz4740.c25
-rw-r--r--drivers/rtc/rtc-mpfs.c26
-rw-r--r--drivers/rtc/rtc-mxc.c27
-rw-r--r--drivers/rtc/rtc-rv3028.c5
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c56
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/hmcdrv_cache.c2
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmur.c37
-rw-r--r--drivers/s390/char/vmur.h2
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c92
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h11
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c109
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/qeth_ethtool.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.c14
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c10
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/esas2r/atioctl.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c37
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c14
-rw-r--r--drivers/scsi/hpsa.c12
-rw-r--r--drivers/scsi/hptiop.c9
-rw-r--r--drivers/scsi/hptiop.h4
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c73
-rw-r--r--drivers/scsi/iscsi_tcp.h3
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h37
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1092
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c234
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h59
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c443
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c86
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c273
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h12
-rw-r--r--drivers/scsi/mpi3mr/Makefile1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h171
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h5
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h22
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h252
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h27
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c1032
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c545
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c3291
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c217
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c124
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c21
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c3
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_lib.c46
-rw-r--r--drivers/scsi/scsi_priv.h11
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/stex.c17
-rw-r--r--drivers/scsi/storvsc_drv.c13
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd33c93.c60
-rw-r--r--drivers/scsi/wd33c93.h5
-rw-r--r--drivers/scsi/xen-scsifront.c8
-rw-r--r--drivers/slimbus/Kconfig3
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c31
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/sifive/Kconfig6
-rw-r--r--drivers/soc/sifive/Makefile2
-rw-r--r--drivers/soc/sifive/sifive_ccache.c255
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c237
-rw-r--r--drivers/soundwire/bus.c94
-rw-r--r--drivers/soundwire/cadence_master.c104
-rw-r--r--drivers/soundwire/dmi-quirks.c27
-rw-r--r--drivers/soundwire/intel.c734
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/qcom.c9
-rw-r--r--drivers/spmi/spmi-pmic-arb.c91
-rw-r--r--drivers/spmi/spmi.c4
-rw-r--r--drivers/ssb/driver_gpio.c6
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/clocking-wizard/Kconfig10
-rw-r--r--drivers/staging/clocking-wizard/Makefile2
-rw-r--r--drivers/staging/clocking-wizard/TODO13
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt30
-rw-r--r--drivers/staging/fwserial/Kconfig32
-rw-r--r--drivers/staging/fwserial/Makefile3
-rw-r--r--drivers/staging/fwserial/TODO14
-rw-r--r--drivers/staging/fwserial/dma_fifo.c294
-rw-r--r--drivers/staging/fwserial/dma_fifo.h117
-rw-r--r--drivers/staging/fwserial/fwserial.c2890
-rw-r--r--drivers/staging/fwserial/fwserial.h359
-rw-r--r--drivers/staging/greybus/audio_helper.c11
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/cdc/Kconfig17
-rw-r--r--drivers/staging/iio/cdc/Makefile6
-rw-r--r--drivers/staging/iio/frequency/ad9832.c4
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/iio/meter/ade7854.h2
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c4
-rw-r--r--drivers/staging/octeon/ethernet-tx.c4
-rw-r--r--drivers/staging/octeon/ethernet-tx.h4
-rw-r--r--drivers/staging/pi433/pi433_if.c16
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/staging/r8188eu/Makefile6
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c42
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c1
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c91
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c13
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c285
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c153
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c84
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c19
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c204
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c28
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c45
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c269
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c68
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c14
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c39
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c173
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c65
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c89
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c40
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c45
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c15
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_xmit.c22
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_recv.c91
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c205
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c15
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h1
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_BB.h4
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h3
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_RF.h2
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h7
-rw-r--r--drivers/staging/r8188eu/include/hal_com.h4
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/r8188eu/include/ioctl_cfg80211.h89
-rw-r--r--drivers/staging/r8188eu/include/mlme_osdep.h19
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h1
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h21
-rw-r--r--drivers/staging/r8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/r8188eu/include/recv_osdep.h30
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h11
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h12
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_xmit.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h4
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h4
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h16
-rw-r--r--drivers/staging/r8188eu/include/wlan_bssdef.h4
-rw-r--r--drivers/staging/r8188eu/include/xmit_osdep.h49
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c16
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c205
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c18
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c11
-rw-r--r--drivers/staging/r8188eu/os_dep/recv_linux.c165
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c59
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c237
-rw-r--r--drivers/staging/rtl8192e/Kconfig12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c185
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c164
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c26
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c296
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h14
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c210
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c26
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c68
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c68
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c9
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h53
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c128
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h16
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c10
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c42
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c26
-rw-r--r--drivers/staging/rtl8192u/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c6
-rw-r--r--drivers/staging/rtl8192u/r8192U.h9
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c226
-rw-r--r--drivers/staging/rtl8192u/r8192U_debugfs.c188
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c18
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h2
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c100
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c17
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c21
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c195
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c119
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c57
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c130
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h39
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h1
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h9
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_odm.h28
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c109
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c60
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c50
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c17
-rw-r--r--drivers/staging/sm750fb/sm750.c12
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c70
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/card.c30
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/device_main.c104
-rw-r--r--drivers/staging/vt6655/mac.c141
-rw-r--r--drivers/staging/vt6655/mac.h124
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/tee/optee/call.c18
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/imx_sc_thermal.c68
-rw-r--r--drivers/thermal/qcom/Kconfig2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c3
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c2
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thermal/thermal_of.c44
-rw-r--r--drivers/thermal/thermal_sysfs.c8
-rw-r--r--drivers/thunderbolt/Kconfig10
-rw-r--r--drivers/thunderbolt/debugfs.c836
-rw-r--r--drivers/thunderbolt/domain.c10
-rw-r--r--drivers/thunderbolt/icm.c3
-rw-r--r--drivers/thunderbolt/nhi.c55
-rw-r--r--drivers/thunderbolt/nhi.h3
-rw-r--r--drivers/thunderbolt/nvm.c385
-rw-r--r--drivers/thunderbolt/retimer.c113
-rw-r--r--drivers/thunderbolt/sb_regs.h58
-rw-r--r--drivers/thunderbolt/switch.c449
-rw-r--r--drivers/thunderbolt/tb.c33
-rw-r--r--drivers/thunderbolt/tb.h56
-rw-r--r--drivers/thunderbolt/tb_regs.h3
-rw-r--r--drivers/thunderbolt/usb4.c140
-rw-r--r--drivers/thunderbolt/xdomain.c37
-rw-r--r--drivers/tty/amiserial.c6
-rw-r--r--drivers/tty/hvc/hvc_iucv.c11
-rw-r--r--drivers/tty/hvc/hvcs.c3
-rw-r--r--drivers/tty/moxa.c9
-rw-r--r--drivers/tty/mxser.c8
-rw-r--r--drivers/tty/n_gsm.c202
-rw-r--r--drivers/tty/n_hdlc.c28
-rw-r--r--drivers/tty/n_tty.c2
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/21285.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c16
-rw-r--r--drivers/tty/serial/8250/8250_dma.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h2
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mid.c5
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c9
-rw-r--r--drivers/tty/serial/8250/8250_pci.c14
-rw-r--r--drivers/tty/serial/8250/8250_port.c62
-rw-r--r--drivers/tty/serial/Kconfig20
-rw-r--r--drivers/tty/serial/altera_jtaguart.c36
-rw-r--r--drivers/tty/serial/altera_uart.c18
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c20
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c9
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c87
-rw-r--r--drivers/tty/serial/atmel_serial.h75
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h1
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c55
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/dz.c11
-rw-r--r--drivers/tty/serial/earlycon.c6
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c27
-rw-r--r--drivers/tty/serial/icom.c5
-rw-r--r--drivers/tty/serial/imx.c10
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c3
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/tty/serial/lantiq.c25
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c12
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c4
-rw-r--r--drivers/tty/serial/meson_uart.c31
-rw-r--r--drivers/tty/serial/milbeaut_usio.c3
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c30
-rw-r--r--drivers/tty/serial/mps2-uart.c2
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/omap-serial.c49
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c52
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c8
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/rp2.c5
-rw-r--r--drivers/tty/serial/sa1100.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c2
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/sccnxp.c3
-rw-r--r--drivers/tty/serial/serial-tegra.c15
-rw-r--r--drivers/tty/serial/serial_core.c46
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/sifive.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c5
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c108
-rw-r--r--drivers/tty/serial/sunhv.c2
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsab.c22
-rw-r--r--drivers/tty/serial/sunsu.c8
-rw-r--r--drivers/tty/serial/sunzilog.c8
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/tty/serial/timbuart.c4
-rw-r--r--drivers/tty/serial/uartlite.c5
-rw-r--r--drivers/tty/serial/ucc_uart.c18
-rw-r--r--drivers/tty/serial/vt8500_serial.c17
-rw-r--r--drivers/tty/serial/xilinx_uartps.c62
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink_gt.c11
-rw-r--r--drivers/tty/tty.h2
-rw-r--r--drivers/tty/tty_baudrate.c26
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/tty_ioctl.c79
-rw-r--r--drivers/tty/tty_mutex.c6
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/vt.c11
-rw-r--r--drivers/ufs/core/ufs-sysfs.c85
-rw-r--r--drivers/ufs/core/ufshcd-priv.h11
-rw-r--r--drivers/ufs/core/ufshcd.c95
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h27
-rw-r--r--drivers/ufs/host/ufs-mediatek.c205
-rw-r--r--drivers/ufs/host/ufs-mediatek.h7
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/uio/uio_dfl.c2
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c2
-rw-r--r--drivers/usb/chipidea/Kconfig10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c1
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/otg_fsm.c7
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/debug.c96
-rw-r--r--drivers/usb/common/ulpi.c20
-rw-r--r--drivers/usb/common/usb-conn-gpio.c6
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c11
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/dwc2/core.c30
-rw-r--r--drivers/usb/dwc2/core.h30
-rw-r--r--drivers/usb/dwc2/core_intr.c30
-rw-r--r--drivers/usb/dwc2/hcd.c30
-rw-r--r--drivers/usb/dwc2/hcd.h31
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c30
-rw-r--r--drivers/usb/dwc2/hcd_intr.c30
-rw-r--r--drivers/usb/dwc2/hcd_queue.c30
-rw-r--r--drivers/usb/dwc2/hw.h30
-rw-r--r--drivers/usb/dwc2/params.c30
-rw-r--r--drivers/usb/dwc2/pci.c30
-rw-r--r--drivers/usb/dwc2/platform.c30
-rw-r--r--drivers/usb/dwc3/core.c137
-rw-r--r--drivers/usb/dwc3/core.h7
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/drd.c50
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c14
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c22
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c63
-rw-r--r--drivers/usb/dwc3/ep0.c11
-rw-r--r--drivers/usb/dwc3/gadget.c73
-rw-r--r--drivers/usb/dwc3/trace.h3
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c11
-rw-r--r--drivers/usb/gadget/function/f_ncm.c60
-rw-r--r--drivers/usb/gadget/function/f_printer.c12
-rw-r--r--drivers/usb/gadget/function/f_tcm.c4
-rw-r--r--drivers/usb/gadget/function/f_uvc.c37
-rw-r--r--drivers/usb/gadget/function/rndis.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/gadget/function/u_serial.c4
-rw-r--r--drivers/usb/gadget/function/uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c294
-rw-r--r--drivers/usb/gadget/function/uvc_video.c9
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c10
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c4
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c131
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c78
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c6
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-atmel.c3
-rw-r--r--drivers/usb/host/ehci-exynos.c19
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c53
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c4
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-spear.c4
-rw-r--r--drivers/usb/host/ehci-st.c4
-rw-r--r--drivers/usb/host/fhci-hcd.c63
-rw-r--r--drivers/usb/host/fhci-hub.c15
-rw-r--r--drivers/usb/host/fhci.h4
-rw-r--r--drivers/usb/host/fotg210-hcd.c1
-rw-r--r--drivers/usb/host/ohci-at91.c3
-rw-r--r--drivers/usb/host/ohci-da8xx.c1
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-pci.c4
-rw-r--r--drivers/usb/host/ohci-platform.c32
-rw-r--r--drivers/usb/host/ohci-pxa27x.c4
-rw-r--r--drivers/usb/host/ohci-s3c2410.c3
-rw-r--r--drivers/usb/host/ohci-spear.c3
-rw-r--r--drivers/usb/host/ohci-st.c4
-rw-r--r--drivers/usb/host/u132-hcd.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-mem.c7
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c18
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/misc/idmouse.c8
-rw-r--r--drivers/usb/misc/usb251xb.c18
-rw-r--r--drivers/usb/misc/usb3503.c25
-rw-r--r--drivers/usb/misc/uss720.c8
-rw-r--r--drivers/usb/mon/mon_bin.c5
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c2
-rw-r--r--drivers/usb/musb/da8xx.c8
-rw-r--r--drivers/usb/musb/jz4740.c10
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_cppi41.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/sunxi.c29
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-jz4770.c25
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c14
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/f81232.c3
-rw-r--r--drivers/usb/serial/f81534.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c491
-rw-r--r--drivers/usb/serial/ftdi_sio.h22
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/io_edgeport.c7
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c3
-rw-r--r--drivers/usb/serial/keyspan.c3
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c5
-rw-r--r--drivers/usb/serial/kobil_sct.c6
-rw-r--r--drivers/usb/serial/mct_u232.c5
-rw-r--r--drivers/usb/serial/mos7720.c5
-rw-r--r--drivers/usb/serial/mos7840.c5
-rw-r--r--drivers/usb/serial/mxuport.c4
-rw-r--r--drivers/usb/serial/oti6858.c6
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c3
-rw-r--r--drivers/usb/serial/ssu100.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c4
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/xr_serial.c20
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/usb/typec/anx7411.c4
-rw-r--r--drivers/usb/typec/mux.c4
-rw-r--r--drivers/usb/typec/qcom-pmic-typec.c5
-rw-r--r--drivers/usb/typec/retimer.c2
-rw-r--r--drivers/usb/typec/stusb160x.c7
-rw-r--r--drivers/usb/typec/tcpm/Kconfig11
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c22
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c207
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c150
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c12
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c2
-rw-r--r--drivers/usb/usbip/stub_main.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c4
-rw-r--r--drivers/usb/usbip/usbip_common.c91
-rw-r--r--drivers/vdpa/vdpa.c73
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c12
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c5
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c22
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/Makefile7
-rw-r--r--drivers/vfio/container.c680
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c89
-rw-r--r--drivers/vfio/iova_bitmap.c422
-rw-r--r--drivers/vfio/mdev/mdev_core.c195
-rw-r--r--drivers/vfio/mdev/mdev_driver.c7
-rw-r--r--drivers/vfio/mdev/mdev_private.h32
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c190
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c136
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h7
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c995
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h63
-rw-r--r--drivers/vfio/pci/mlx5/main.c55
-rw-r--r--drivers/vfio/pci/vfio_pci.c22
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c1105
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c8
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c34
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h104
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c72
-rw-r--r--drivers/vfio/platform/vfio_platform.c66
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c71
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h18
-rw-r--r--drivers/vfio/vfio.h62
-rw-r--r--drivers/vfio/vfio_iommu_type1.c1
-rw-r--r--drivers/vfio/vfio_main.c1392
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/video/fbdev/arkfb.c8
-rw-r--r--drivers/video/fbdev/controlfb.c7
-rw-r--r--drivers/video/fbdev/gbefb.c20
-rw-r--r--drivers/video/fbdev/imxfb.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c6
-rw-r--r--drivers/video/fbdev/smscufx.c14
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c11
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c12
-rw-r--r--drivers/video/fbdev/vga16fb.c1
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c16
-rw-r--r--drivers/virtio/virtio_pci_common.c3
-rw-r--r--drivers/virtio/virtio_ring.c18
-rw-r--r--drivers/w1/w1_netlink.c3
-rw-r--r--drivers/watchdog/Kconfig23
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/aspeed_wdt.c12
-rw-r--r--drivers/watchdog/bd9576_wdt.c51
-rw-r--r--drivers/watchdog/booke_wdt.c8
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/exar_wdt.c427
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c25
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c212
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c24
-rw-r--r--drivers/watchdog/npcm_wdt.c16
-rw-r--r--drivers/watchdog/rti_wdt.c3
-rw-r--r--drivers/watchdog/rzg2l_wdt.c39
-rw-r--r--drivers/watchdog/s3c2410_wdt.c41
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c13
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/w83627hf_wdt.c12
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c18
-rw-r--r--drivers/watchdog/wdat_wdt.c5
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/gntdev-common.h3
-rw-r--r--drivers/xen/gntdev.c80
-rw-r--r--drivers/xen/grant-dma-ops.c133
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c12
2247 files changed, 98031 insertions, 36733 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 057857258bfd..bdf1c66141c9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -175,6 +175,7 @@ obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
+obj-$(CONFIG_HISI_PTT) += hwtracing/ptt/
obj-y += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c
index 34f11cd47073..56419dbb28d3 100644
--- a/drivers/accessibility/speakup/speakup_dummy.c
+++ b/drivers/accessibility/speakup/speakup_dummy.c
@@ -27,6 +27,7 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
{ TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
+ { PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
@@ -42,6 +43,8 @@ static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute punct_attribute =
+ __ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
@@ -69,6 +72,7 @@ static struct attribute *synth_attrs[] = {
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
+ &punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 99f1d4ac426a..28c8f60370cf 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -26,6 +26,7 @@
static int softsynth_probe(struct spk_synth *synth);
static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth);
+static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var);
static unsigned char get_index(struct spk_synth *synth);
static struct miscdevice synth_device, synthu_device;
@@ -33,6 +34,9 @@ static int init_pos;
static int misc_registered;
static struct var_t vars[] = {
+ /* DIRECT is put first so that module_param_named can access it easily */
+ { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
+
{ CAPS_START, .u.s = {"\x01+3p" } },
{ CAPS_STOP, .u.s = {"\x01-3p" } },
{ PAUSE, .u.n = {"\x01P" } },
@@ -41,10 +45,9 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
{ TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
- { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } },
+ { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } },
{ VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
{ FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
- { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
@@ -133,7 +136,7 @@ static struct spk_synth synth_soft = {
.catch_up = NULL,
.flush = NULL,
.is_alive = softsynth_is_alive,
- .synth_adjust = NULL,
+ .synth_adjust = softsynth_adjust,
.read_buff_add = NULL,
.get_index = get_index,
.indexing = {
@@ -426,9 +429,32 @@ static int softsynth_is_alive(struct spk_synth *synth)
return 0;
}
+static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var)
+{
+ struct st_var_header *punc_level_var;
+ struct var_t *var_data;
+
+ if (var->var_id != PUNC_LEVEL)
+ return 0;
+
+ /* We want to set the the speech synthesis punctuation level
+ * accordingly, so it properly tunes speaking A_PUNC characters */
+ var_data = var->data;
+ if (!var_data)
+ return 0;
+ punc_level_var = spk_get_var_header(PUNCT);
+ if (!punc_level_var)
+ return 0;
+ spk_set_num_var(var_data->u.n.value, punc_level_var, E_SET);
+
+ return 1;
+}
+
module_param_named(start, synth_soft.startup, short, 0444);
+module_param_named(direct, vars[0].u.n.default_val, int, 0444);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
+MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_soft);
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index 6a96ad94bc3f..3a14d39bf896 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -195,7 +195,7 @@ struct spk_synth {
void (*catch_up)(struct spk_synth *synth);
void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth);
- int (*synth_adjust)(struct st_var_header *var);
+ int (*synth_adjust)(struct spk_synth *synth, struct st_var_header *var);
void (*read_buff_add)(u_char c);
unsigned char (*get_index)(struct spk_synth *synth);
struct synth_indexing indexing;
diff --git a/drivers/accessibility/speakup/varhandlers.c b/drivers/accessibility/speakup/varhandlers.c
index 067c0da97dcb..e1c9f42e39f0 100644
--- a/drivers/accessibility/speakup/varhandlers.c
+++ b/drivers/accessibility/speakup/varhandlers.c
@@ -138,6 +138,7 @@ struct st_var_header *spk_get_var_header(enum var_id_t var_id)
return NULL;
return p_header;
}
+EXPORT_SYMBOL_GPL(spk_get_var_header);
struct st_var_header *spk_var_header_by_name(const char *name)
{
@@ -221,15 +222,17 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val];
- return 0;
}
if (var_data->u.n.multiplier != 0)
val *= var_data->u.n.multiplier;
val += var_data->u.n.offset;
- if (var->var_id < FIRST_SYNTH_VAR || !synth)
+
+ if (!synth)
+ return 0;
+ if (synth->synth_adjust && synth->synth_adjust(synth, var))
+ return 0;
+ if (var->var_id < FIRST_SYNTH_VAR)
return 0;
- if (synth->synth_adjust)
- return synth->synth_adjust(var);
if (!var_data->u.n.synth_fmt)
return 0;
@@ -245,6 +248,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
synth_printf("%s", cp);
return 0;
}
+EXPORT_SYMBOL_GPL(spk_set_num_var);
int spk_set_string_var(const char *page, struct st_var_header *var, int len)
{
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d91ad378c00d..80ad530583c9 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
ghes_estatus_cache_add(generic, estatus);
}
- if (task_work_pending && current->mm != &init_mm) {
+ if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d594effe905f..97450f4003cc 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -687,7 +687,22 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
+ } else if (device_may_wakeup(dev) && dev->power.wakeirq) {
+ /*
+ * The ACPI subsystem doesn't manage the wake bit for IRQs
+ * defined with ExclusiveAndWake and SharedAndWake. Instead we
+ * expect them to be managed via the PM subsystem. Drivers
+ * should call dev_pm_set_wake_irq to register an IRQ as a wake
+ * source.
+ *
+ * If a device has a wake IRQ attached we need to check the
+ * _S0W method to get the correct wake D-state. Otherwise we
+ * end up putting the device into D3Cold which will more than
+ * likely disable wake functionality.
+ */
+ wakeup = true;
} else {
+ /* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid;
}
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 4db5bb587599..1cc4647f78b8 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -147,6 +147,7 @@ struct acpi_irq_parse_one_ctx {
* @polarity: polarity attributes of hwirq
* @polarity: polarity attributes of hwirq
* @shareable: shareable attributes of hwirq
+ * @wake_capable: wake capable attribute of hwirq
* @ctx: acpi_irq_parse_one_ctx updated by this function
*
* Description:
@@ -156,12 +157,13 @@ struct acpi_irq_parse_one_ctx {
static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
u32 hwirq, u8 triggering,
u8 polarity, u8 shareable,
+ u8 wake_capable,
struct acpi_irq_parse_one_ctx *ctx)
{
if (!fwnode)
return;
ctx->rc = 0;
- *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
ctx->fwspec->fwnode = fwnode;
ctx->fwspec->param[0] = hwirq;
ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
@@ -204,7 +206,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
- irq->shareable, ctx);
+ irq->shareable, irq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
@@ -218,7 +220,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
- eirq->shareable, ctx);
+ eirq->shareable, eirq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 514d89656dde..6f9489edfb4e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -336,8 +336,9 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
* @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI.
* @shareable: Whether or not the interrupt is shareable.
+ * @wake_capable: Wake capability as provided by ACPI.
*/
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable)
{
unsigned long flags;
@@ -351,6 +352,9 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
if (shareable == ACPI_SHARED)
flags |= IORESOURCE_IRQ_SHAREABLE;
+ if (wake_capable == ACPI_WAKE_CAPABLE)
+ flags |= IORESOURCE_IRQ_WAKECAPABLE;
+
return flags | IORESOURCE_IRQ;
}
EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
@@ -468,7 +472,7 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool check_override)
+ u8 wake_capable, bool check_override)
{
int irq, p, t;
@@ -501,7 +505,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
- res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
if (irq >= 0) {
res->start = irq;
@@ -549,7 +553,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, true);
+ irq->shareable, irq->wake_capable,
+ true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
@@ -560,7 +565,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
- ext_irq->shareable, false);
+ ext_irq->shareable, ext_irq->wake_capable,
+ false);
else
irqresource_disabled(res, 0);
break;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ad4b2987b3d6..0b557c0d405e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1088,6 +1088,14 @@ int __init acpi_sleep_init(void)
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off, NULL);
+
+ /*
+ * Windows uses S5 for reboot, so some BIOSes depend on it to
+ * perform proper reboot.
+ */
+ register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_FIRMWARE,
+ acpi_power_off_prepare, NULL);
} else {
acpi_no_s5 = true;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 539660ef93c7..40b07057983e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -158,7 +158,7 @@ struct acpi_thermal_flags {
};
struct acpi_thermal {
- struct acpi_device * device;
+ struct acpi_device *device;
acpi_bus_id name;
unsigned long temperature;
unsigned long last_temperature;
@@ -262,7 +262,7 @@ do { \
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
- acpi_status status = AE_OK;
+ acpi_status status;
unsigned long long tmp;
struct acpi_handle_list devices;
int valid = 0;
@@ -270,8 +270,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Shutdown */
if (flag & ACPI_TRIPS_CRITICAL) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_CRT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
tz->trips.critical.temperature = tmp;
/*
* Treat freezing temperatures as invalid as well; some
@@ -284,8 +283,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
acpi_handle_debug(tz->device->handle,
"No critical threshold\n");
} else if (tmp <= 2732) {
- pr_info(FW_BUG "Invalid critical threshold (%llu)\n",
- tmp);
+ pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -312,8 +310,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Sleep (optional) */
if (flag & ACPI_TRIPS_HOT) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_HOT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
acpi_handle_debug(tz->device->handle,
@@ -329,7 +326,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Passive (optional) */
if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
- (flag == ACPI_TRIPS_INIT)) {
+ flag == ACPI_TRIPS_INIT) {
valid = tz->trips.passive.flags.valid;
if (psv == -1) {
status = AE_SUPPORT;
@@ -338,32 +335,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tmp);
+ "_PSV", NULL, &tmp);
}
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
tz->trips.passive.flags.valid = 0;
- else {
+ } else {
tz->trips.passive.temperature = tmp;
tz->trips.passive.flags.valid = 1;
if (flag == ACPI_TRIPS_INIT) {
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC1",
- NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc1 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC2",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc2 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TSP",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
@@ -374,25 +370,25 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
- NULL, &devices);
+ NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
- }
- else
+ } else {
tz->trips.passive.flags.valid = 1;
+ }
if (memcmp(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
@@ -403,29 +399,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (act == -1)
break; /* disable all active trip points */
- if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) &&
- tz->trips.active[i].flags.valid)) {
+ if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
+ tz->trips.active[i].flags.valid)) {
status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tmp);
+ name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.active[i].flags.valid = 0;
if (i == 0)
break;
+
if (act <= 0)
break;
+
if (i == 1)
- tz->trips.active[0].temperature =
- celsius_to_deci_kelvin(act);
+ tz->trips.active[0].temperature = celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
* the next higher trip point
*/
- tz->trips.active[i - 1].temperature =
- (tz->trips.active[i - 2].temperature <
+ tz->trips.active[i-1].temperature =
+ (tz->trips.active[i-2].temperature <
celsius_to_deci_kelvin(act) ?
- tz->trips.active[i - 2].temperature :
+ tz->trips.active[i-2].temperature :
celsius_to_deci_kelvin(act));
+
break;
} else {
tz->trips.active[i].temperature = tmp;
@@ -434,22 +432,22 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
name[2] = 'L';
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
- name, NULL, &devices);
+ name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
tz->trips.active[i].flags.valid = 0;
- }
- else
+ } else {
tz->trips.active[i].flags.valid = 1;
+ }
if (memcmp(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
@@ -464,9 +462,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_DEVICES) {
memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
- NULL, &devices);
- if (ACPI_SUCCESS(status)
- && memcmp(&tz->devices, &devices, sizeof(devices))) {
+ NULL, &devices);
+ if (ACPI_SUCCESS(status) &&
+ memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
@@ -548,8 +546,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++) {
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*type = THERMAL_TRIP_ACTIVE;
return 0;
@@ -572,8 +569,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -582,8 +579,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.hot.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.hot.temperature,
- tz->kelvin_offset);
+ tz->trips.hot.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -592,8 +589,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.passive.temperature,
- tz->kelvin_offset);
+ tz->trips.passive.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -603,8 +600,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.active[i].temperature,
- tz->kelvin_offset);
+ tz->trips.active[i].temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -620,15 +617,16 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
*temperature = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
+ int trip, enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal->devdata;
enum thermal_trip_type type;
@@ -657,9 +655,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
* tz->temperature has already been updated by generic thermal layer,
* before this callback being invoked
*/
- i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature))
- + (tz->trips.passive.tc2
- * (tz->temperature - tz->trips.passive.temperature));
+ i = tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature) +
+ tz->trips.passive.tc2 * (tz->temperature - tz->trips.passive.temperature);
if (i > 0)
*trend = THERMAL_TREND_RAISING;
@@ -667,6 +664,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
*trend = THERMAL_TREND_DROPPING;
else
*trend = THERMAL_TREND_STABLE;
+
return 0;
}
@@ -691,8 +689,8 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev,
- bool bind)
+ struct thermal_cooling_device *cdev,
+ bool bind)
{
struct acpi_device *device = cdev->devdata;
struct acpi_thermal *tz = thermal->devdata;
@@ -711,22 +709,23 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
trip++;
- for (i = 0; i < tz->trips.passive.devices.count;
- i++) {
+ for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result =
- thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
result =
- thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -735,22 +734,24 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
trip++;
- for (j = 0;
- j < tz->trips.active[i].devices.count;
- j++) {
+ for (j = 0; j < tz->trips.active[i].devices.count; j++) {
handle = tz->trips.active[i].devices.handles[j];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result = thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
- result = thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ result = thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -762,14 +763,14 @@ failed:
static int
acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, true);
}
static int
acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
@@ -802,20 +803,20 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (tz->trips.passive.flags.valid)
trips++;
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++, trips++);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid;
+ i++, trips++);
if (tz->trips.passive.flags.valid)
- tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- tz->trips.passive.tsp*100,
- tz->polling_frequency*100);
+ tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz,
+ &acpi_thermal_zone_ops, NULL,
+ tz->trips.passive.tsp * 100,
+ tz->polling_frequency * 100);
else
tz->thermal_zone =
thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- 0, tz->polling_frequency*100);
+ &acpi_thermal_zone_ops, NULL,
+ 0, tz->polling_frequency * 100);
+
if (IS_ERR(tz->thermal_zone))
return -ENODEV;
@@ -881,7 +882,6 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
-
if (!tz)
return;
@@ -893,13 +893,13 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
@@ -942,8 +942,7 @@ static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
- int result = 0;
-
+ int result;
if (!tz)
return -EINVAL;
@@ -1020,9 +1019,8 @@ static void acpi_thermal_check_fn(struct work_struct *work)
static int acpi_thermal_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_thermal *tz = NULL;
-
+ struct acpi_thermal *tz;
+ int result;
if (!device)
return -EINVAL;
@@ -1063,7 +1061,7 @@ end:
static int acpi_thermal_remove(struct acpi_device *device)
{
- struct acpi_thermal *tz = NULL;
+ struct acpi_thermal *tz;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1099,6 +1097,7 @@ static int acpi_thermal_resume(struct device *dev)
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
tz->trips.active[i].flags.enabled = 1;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
result = acpi_bus_update_power(
@@ -1119,7 +1118,6 @@ static int acpi_thermal_resume(struct device *dev)
#endif
static int thermal_act(const struct dmi_system_id *d) {
-
if (act == 0) {
pr_notice("%s detected: disabling all active thermal trip points\n",
d->ident);
@@ -1128,14 +1126,12 @@ static int thermal_act(const struct dmi_system_id *d) {
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
-
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
-
if (tzp == 0) {
pr_notice("%s detected: enabling thermal zone polling\n",
d->ident);
@@ -1144,7 +1140,6 @@ static int thermal_tzp(const struct dmi_system_id *d) {
return 0;
}
static int thermal_psv(const struct dmi_system_id *d) {
-
if (psv == 0) {
pr_notice("%s detected: disabling all passive thermal trip points\n",
d->ident);
@@ -1195,7 +1190,7 @@ static const struct dmi_system_id thermal_dmi_table[] __initconst = {
static int __init acpi_thermal_init(void)
{
- int result = 0;
+ int result;
dmi_check_system(thermal_dmi_table);
@@ -1222,8 +1217,6 @@ static void __exit acpi_thermal_exit(void)
{
acpi_bus_unregister_driver(&acpi_thermal_driver);
destroy_workqueue(acpi_thermal_pm_queue);
-
- return;
}
module_init(acpi_thermal_init);
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 6132092dab2a..ed752cbbe636 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/dma-iommu.h>
#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 42f249070c09..5350c73564b6 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -654,12 +654,14 @@ void __init acpi_s2idle_setup(void)
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return -ENODEV;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_add(&arg->list_node, &lps0_s2idle_devops_head);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return 0;
}
@@ -667,12 +669,14 @@ EXPORT_SYMBOL_GPL(acpi_register_lps0_dev);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_del(&arg->list_node);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6428f6be69e3..880224ec6abb 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4259,10 +4259,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
struct binder_proc *proc = thread->proc;
int ret = 0;
- freezer_do_not_count();
binder_inner_proc_lock(proc);
for (;;) {
- prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
if (binder_has_work_ilocked(thread, do_proc_work))
break;
if (do_proc_work)
@@ -4279,7 +4278,6 @@ static int binder_wait_for_work(struct binder_thread *thread,
}
finish_wait(&thread->wait, &wait);
binder_inner_proc_unlock(proc);
- freezer_count();
return ret;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 9b1778c00610..1c39cfce32fa 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
- mm = alloc->vma_vm_mm;
+ if (need_mm && mmget_not_zero(alloc->mm))
+ mm = alloc->mm;
if (mm) {
mmap_read_lock(mm);
@@ -309,34 +309,13 @@ err_no_vma:
return vma ? -ENOMEM : -ESRCH;
}
-
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- unsigned long vm_start = 0;
-
- /*
- * Allow clearing the vma with holding just the read lock to allow
- * munmapping downgrade of the write lock before freeing and closing the
- * file using binder_alloc_vma_close().
- */
- if (vma) {
- vm_start = vma->vm_start;
- mmap_assert_write_locked(alloc->vma_vm_mm);
- } else {
- mmap_assert_locked(alloc->vma_vm_mm);
- }
-
- alloc->vma_addr = vm_start;
-}
-
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
struct vm_area_struct *vma = NULL;
if (alloc->vma_addr)
- vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+ vma = vma_lookup(alloc->mm, alloc->vma_addr);
return vma;
}
@@ -401,15 +380,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
- mmap_read_lock(alloc->vma_vm_mm);
+ mmap_read_lock(alloc->mm);
if (!binder_alloc_get_vma(alloc)) {
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -793,7 +772,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- binder_alloc_set_vma(alloc, vma);
+ alloc->vma_addr = vma->vm_start;
return 0;
@@ -823,7 +802,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr &&
- vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
+ vma_lookup(alloc->mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -873,8 +852,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
kfree(alloc->pages);
}
mutex_unlock(&alloc->mutex);
- if (alloc->vma_vm_mm)
- mmdrop(alloc->vma_vm_mm);
+ if (alloc->mm)
+ mmdrop(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -931,13 +910,13 @@ void binder_alloc_print_pages(struct seq_file *m,
* read inconsistent state.
*/
- mmap_read_lock(alloc->vma_vm_mm);
+ mmap_read_lock(alloc->mm);
if (binder_alloc_get_vma(alloc) == NULL) {
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
goto uninitialized;
}
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
@@ -983,7 +962,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ alloc->vma_addr = 0;
}
/**
@@ -1020,7 +999,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- mm = alloc->vma_vm_mm;
+ mm = alloc->mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!mmap_read_trylock(mm))
@@ -1089,8 +1068,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
- alloc->vma_vm_mm = current->mm;
- mmgrab(alloc->vma_vm_mm);
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 1e4fd37af5e0..0f811ac4bcff 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -74,11 +74,10 @@ struct binder_lru_page {
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @vma: vm_area_struct passed to mmap_handler
- * (invarient after mmap)
- * @tsk: tid for task that called init for this proc
- * (invariant after init)
- * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @mutex: protects binder_alloc fields
+ * @vma_addr: vm_area_struct->vm_start passed to mmap_handler
+ * (invariant after mmap)
+ * @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
@@ -101,7 +100,7 @@ struct binder_lru_page {
struct binder_alloc {
struct mutex mutex;
unsigned long vma_addr;
- struct mm_struct *vma_vm_mm;
+ struct mm_struct *mm;
void __user *buffer;
struct list_head buffers;
struct rb_root free_buffers;
@@ -109,7 +108,6 @@ struct binder_alloc {
size_t free_async_space;
struct binder_lru_page *pages;
size_t buffer_size;
- uint32_t buffer_free;
int pid;
size_t pages_high;
bool oneway_spam_detected;
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 588d753a7a19..09b2ce7e4c34 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -39,7 +39,6 @@
#define FIRST_INODE 1
#define SECOND_INODE 2
#define INODE_OFFSET 3
-#define INTSTRLEN 21
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
/* Ensure that the initial ipc namespace always has devices available. */
#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
@@ -340,22 +339,10 @@ static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-static void binderfs_put_super(struct super_block *sb)
-{
- struct binderfs_info *info = sb->s_fs_info;
-
- if (info && info->ipc_ns)
- put_ipc_ns(info->ipc_ns);
-
- kfree(info);
- sb->s_fs_info = NULL;
-}
-
static const struct super_operations binderfs_super_ops = {
.evict_inode = binderfs_evict_inode,
.show_options = binderfs_show_options,
.statfs = simple_statfs,
- .put_super = binderfs_put_super,
};
static inline bool is_binderfs_control_device(const struct dentry *dentry)
@@ -785,11 +772,27 @@ static int binderfs_init_fs_context(struct fs_context *fc)
return 0;
}
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
static struct file_system_type binder_fs_type = {
.name = "binder",
.init_fs_context = binderfs_init_fs_context,
.parameters = binderfs_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = binderfs_kill_super,
.fs_flags = FS_USERNS_MOUNT,
};
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index da7ee8bec165..7add8e79912b 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -257,7 +257,7 @@ enum {
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
- EM_MAX_SLOTS = 8,
+ EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f61795c546cf..6f216eb25610 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- priv->version = (enum brcm_ahci_version)of_id->data;
+ priv->version = (unsigned long)of_id->data;
priv->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index b734e069034d..a950767f7948 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->ahci_pdev = pdev;
imxpriv->no_device = false;
imxpriv->first_time = true;
- imxpriv->type = (enum ahci_imx_type)of_id->data;
+ imxpriv->type = (unsigned long)of_id->data;
imxpriv->sata_clk = devm_clk_get(dev, "sata");
if (IS_ERR(imxpriv->sata_clk)) {
@@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ahci:imx");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 6cd61842ad48..9cf9bf36a874 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_id)
- qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+ qoriq_priv->type = (unsigned long)of_id->data;
else
qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data;
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 5a2cac60a29a..8607b68eee53 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
.pm = &st_ahci_pm_ops,
- .of_match_table = of_match_ptr(st_ahci_match),
+ .of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
.remove = ata_platform_remove_one,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 7bb5db17f864..1e08704d5117 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
of_devid = of_match_device(xgene_ahci_of_match, dev);
if (of_devid) {
if (of_devid->data)
- version = (enum xgene_ahci_version) of_devid->data;
+ version = (unsigned long) of_devid->data;
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 590ebea99601..0195eb29f6c2 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->type = (enum sata_rcar_type)of_device_get_match_data(dev);
+ priv->type = (unsigned long)of_device_get_match_data(dev);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index dd90591e51ba..e7d6e6657ffa 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -841,4 +841,23 @@ void __init init_cpu_topology(void)
return;
}
}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+}
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b3a43a164dcd..b902d1ecc247 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -154,8 +154,6 @@ extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
-extern char *make_class_name(const char *name, struct kobject *kobj);
-
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8feb85e186e3..64f7b9a0970f 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(__class_create);
*/
void class_destroy(struct class *cls)
{
- if ((cls == NULL) || (IS_ERR(cls)))
+ if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5fb4bc51dd8b..d02501933467 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2509,7 +2509,7 @@ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
- dev_err(dev, "uevent: failed to send synthetic uevent\n");
+ dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc;
}
@@ -4170,7 +4170,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
struct device *dev = NULL;
int retval = -ENODEV;
- if (class == NULL || IS_ERR(class))
+ if (IS_ERR_OR_NULL(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ec69b43f926a..3dda62503102 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -836,7 +836,7 @@ static int __init save_async_options(char *buf)
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
- strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
async_probe_default = parse_option_str(async_probe_drv_names, "*");
return 1;
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index f4d794d6bb85..1c06781f7114 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -25,6 +25,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
+ /*
+ * Here, mutex is required to serialize the calls to del_wk work between
+ * user/kernel space which happens when devcd is added with device_add()
+ * and that sends uevent to user space. User space reads the uevents,
+ * and calls to devcd_data_write() which try to modify the work which is
+ * not even initialized/queued from devcoredump.
+ *
+ *
+ *
+ * cpu0(X) cpu1(Y)
+ *
+ * dev_coredump() uevent sent to user space
+ * device_add() ======================> user space process Y reads the
+ * uevents writes to devcd fd
+ * which results into writes to
+ *
+ * devcd_data_write()
+ * mod_delayed_work()
+ * try_to_grab_pending()
+ * del_timer()
+ * debug_assert_init()
+ * INIT_DELAYED_WORK()
+ * schedule_delayed_work()
+ *
+ *
+ * Also, mutex alone would not be enough to avoid scheduling of
+ * del_wk work after it get flush from a call to devcd_free()
+ * mentioned as below.
+ *
+ * disabled_store()
+ * devcd_free()
+ * mutex_lock() devcd_data_write()
+ * flush_delayed_work()
+ * mutex_unlock()
+ * mutex_lock()
+ * mod_delayed_work()
+ * mutex_unlock()
+ * So, delete_work flag is required.
+ */
+ struct mutex mutex;
+ bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@@ -84,7 +125,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work) {
+ devcd->delete_work = true;
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ }
+ mutex_unlock(&devcd->mutex);
return count;
}
@@ -112,7 +158,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work)
+ devcd->delete_work = true;
+
flush_delayed_work(&devcd->del_wk);
+ mutex_unlock(&devcd->mutex);
return 0;
}
@@ -122,6 +173,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
+/*
+ *
+ * disabled_store() worker()
+ * class_for_each_device(&devcd_class,
+ * NULL, NULL, devcd_free)
+ * ...
+ * ...
+ * while ((dev = class_dev_iter_next(&iter))
+ * devcd_del()
+ * device_del()
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+ * mutex_lock(&devcd->mutex);
+ *
+ *
+ * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+ * is called after kfree of devcd memory after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+ */
+
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@@ -278,13 +353,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
+ devcd->delete_work = false;
+ mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
+ mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -301,10 +379,11 @@ void dev_coredumpm(struct device *dev, struct module *owner,
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
-
+ mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 864d0b3f566e..4ab2b50ee38f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -117,7 +117,9 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, offsetof(struct devres, data));
+ /* No need to clear memory twice */
+ if (!(gfp & __GFP_ZERO))
+ memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bc60c9cd3230..9aa0da991cfb 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -869,12 +869,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
}
}
-/* return true if the memory block is offlined, otherwise, return false */
-bool is_memblock_offlined(struct memory_block *mem)
-{
- return mem->state == MEM_OFFLINE;
-}
-
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eb0f43784c2b..faf3597a96da 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/hugetlb.h>
static struct bus_type node_subsys = {
.name = "node",
@@ -433,6 +434,7 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n"
+ "Node %d SecPageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
@@ -459,6 +461,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
@@ -587,64 +590,9 @@ static const struct attribute_group *node_dev_groups[] = {
NULL
};
-#ifdef CONFIG_HUGETLBFS
-/*
- * hugetlbfs per node attributes registration interface:
- * When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register its per node attributes for all online nodes with
- * memory. It will also call register_hugetlbfs_with_node(), below, to
- * register its attribute registration functions with this node driver.
- * Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
- */
-static node_registration_func_t __hugetlb_register_node;
-static node_registration_func_t __hugetlb_unregister_node;
-
-static inline bool hugetlb_register_node(struct node *node)
-{
- if (__hugetlb_register_node &&
- node_state(node->dev.id, N_MEMORY)) {
- __hugetlb_register_node(node);
- return true;
- }
- return false;
-}
-
-static inline void hugetlb_unregister_node(struct node *node)
-{
- if (__hugetlb_unregister_node)
- __hugetlb_unregister_node(node);
-}
-
-void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister)
-{
- __hugetlb_register_node = doregister;
- __hugetlb_unregister_node = unregister;
-}
-#else
-static inline void hugetlb_register_node(struct node *node) {}
-
-static inline void hugetlb_unregister_node(struct node *node) {}
-#endif
-
static void node_device_release(struct device *dev)
{
- struct node *node = to_node(dev);
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- /*
- * We schedule the work only when a memory section is
- * onlined/offlined on this node. When we come here,
- * all the memory on this node has been offlined,
- * so we won't enqueue new work to this work.
- *
- * The work is using node->node_work, so we should
- * flush work before freeing the memory.
- */
- flush_work(&node->node_work);
-#endif
- kfree(node);
+ kfree(to_node(dev));
}
/*
@@ -663,13 +611,13 @@ static int register_node(struct node *node, int num)
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
- if (error)
+ if (error) {
put_device(&node->dev);
- else {
+ } else {
hugetlb_register_node(node);
-
compaction_register_node(node);
}
+
return error;
}
@@ -682,8 +630,8 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
+ hugetlb_unregister_node(node);
compaction_unregister_node(node);
- hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -905,74 +853,8 @@ void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
(void *)&nid, func);
return;
}
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * Handle per node hstate attribute [un]registration on transistions
- * to/from memoryless state.
- */
-static void node_hugetlb_work(struct work_struct *work)
-{
- struct node *node = container_of(work, struct node, node_work);
-
- /*
- * We only get here when a node transitions to/from memoryless state.
- * We can detect which transition occurred by examining whether the
- * node has memory now. hugetlb_register_node() already check this
- * so we try to register the attributes. If that fails, then the
- * node has transitioned to memoryless, try to unregister the
- * attributes.
- */
- if (!hugetlb_register_node(node))
- hugetlb_unregister_node(node);
-}
-
-static void init_node_hugetlb_work(int nid)
-{
- INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
-}
-
-static int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
-
- switch (action) {
- case MEM_ONLINE:
- case MEM_OFFLINE:
- /*
- * offload per node hstate [un]registration to a work thread
- * when transitioning to/from memoryless state.
- */
- if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid]->node_work);
- break;
-
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HUGETLBFS */
#endif /* CONFIG_MEMORY_HOTPLUG */
-#if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS)
-static inline int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- return NOTIFY_OK;
-}
-
-static void init_node_hugetlb_work(int nid) { }
-
-#endif
-
int __register_one_node(int nid)
{
int error;
@@ -991,8 +873,6 @@ int __register_one_node(int nid)
}
INIT_LIST_HEAD(&node_devices[nid]->access_list);
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
node_init_caches(nid);
return error;
@@ -1063,13 +943,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
NULL,
};
-#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
void __init node_dev_init(void)
{
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
@@ -1079,8 +954,6 @@ void __init node_dev_init(void)
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
- register_hotmemory_notifier(&node_memory_callback_nb);
-
/*
* Create all node devices, which will properly link the node
* to applicable memory block devices and already created cpu devices.
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 296ea673d661..12b044151298 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -138,6 +138,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
+EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 55a10e6d4e2a..ead135c7044c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2085,8 +2085,10 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
/* Always-on domains must be powered on at initialization. */
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
- !genpd_status_on(genpd))
+ !genpd_status_on(genpd)) {
+ pr_err("always-on PM domain %s is not on\n", genpd->name);
return -EINVAL;
+ }
/* Multiple states but no governor doesn't make sense. */
if (!gov && genpd->state_count > 1)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index ed6f449f8e5c..4d6278a84868 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -17,7 +17,7 @@
#include <linux/property.h>
#include <linux/phy.h>
-struct fwnode_handle *dev_fwnode(struct device *dev)
+struct fwnode_handle *dev_fwnode(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
@@ -1200,7 +1200,7 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
-const void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index fac8ff983aec..65fb9bad1577 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -115,7 +115,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
+ generic_handle_domain_irq_safe(gc->irq.domain, gpio);
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c897c4572036..ee69d50ba4fd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -781,7 +781,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_connection *connection,
timeo = connect_int * HZ;
/* 28.5% random jitter */
- timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
+ timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
if (err <= 0)
@@ -1004,7 +1004,7 @@ retry:
drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
goto retry;
}
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 8f7f144e54f3..7f9bcc82fc9c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
- GFP_NOIO, &drbd_io_bio_set);
- req->private_bio->bi_private = req;
- req->private_bio->bi_end_io = drbd_request_endio;
-
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
| (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
@@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio)
/* Update disk stats */
req->start_jif = bio_start_io_acct(req->master_bio);
- if (!get_ldev(device)) {
- bio_put(req->private_bio);
- req->private_bio = NULL;
+ if (get_ldev(device)) {
+ req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
+ bio, GFP_NOIO,
+ &drbd_io_bio_set);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
}
/* process discards always from our submitter thread */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4762a03e1ffe..5cffd96ef2d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -157,8 +157,6 @@ static struct dentry *nbd_dbg_dir;
#define nbd_name(nbd) ((nbd)->disk->disk_name)
-#define NBD_MAGIC 0x68797548
-
#define NBD_DEF_BLKSIZE_BITS 10
static unsigned int nbds_max = 16;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08b041159cd3..2cfed2e58d64 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -360,10 +360,9 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
-static int rnbd_srv_rdma_ev(void *priv,
- struct rtrs_srv_op *id, int dir,
- void *data, size_t datalen, const void *usr,
- size_t usrlen)
+static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id,
+ void *data, size_t datalen,
+ const void *usr, size_t usrlen)
{
struct rnbd_srv_session *srv_sess = priv;
const struct rnbd_msg_hdr *hdr = usr;
@@ -389,8 +388,8 @@ static int rnbd_srv_rdma_ev(void *priv,
datalen);
break;
default:
- pr_warn("Received unexpected message type %d with dir %d from session %s\n",
- type, dir, srv_sess->sessname);
+ pr_warn("Received unexpected message type %d from session %s\n",
+ type, srv_sess->sessname);
return -EINVAL;
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2651bf41dde3..5afce6ffaadf 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -124,7 +124,7 @@ struct ublk_queue {
bool force_abort;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
- struct ublk_io ios[0];
+ struct ublk_io ios[];
};
#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3f4739d52268..19da5defd734 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -130,7 +130,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
+static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
unsigned short n = 0;
@@ -240,6 +240,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
+ case REQ_OP_SECURE_ERASE:
+ type = VIRTIO_BLK_T_SECURE_ERASE;
+ break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
@@ -251,8 +254,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
- if (virtblk_setup_discard_write_zeroes(req, unmap))
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
+ type == VIRTIO_BLK_T_SECURE_ERASE) {
+ if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
return BLK_STS_RESOURCE;
}
@@ -886,6 +890,8 @@ static int virtblk_probe(struct virtio_device *vdev)
int err, index;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
+ u32 max_discard_segs = 0;
+ u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
@@ -1043,27 +1049,14 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
- discard_sector_alignment, &v);
- if (v)
- q->limits.discard_granularity = v << SECTOR_SHIFT;
- else
- q->limits.discard_granularity = blk_size;
+ discard_sector_alignment, &discard_granularity);
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
- &v);
-
- /*
- * max_discard_seg == 0 is out of spec but we always
- * handled it.
- */
- if (!v)
- v = sg_elems;
- blk_queue_max_discard_segments(q,
- min(v, MAX_DISCARD_SEGMENTS));
+ &max_discard_segs);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
@@ -1072,6 +1065,85 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
}
+ /* The discard and secure erase limits are combined since the Linux
+ * block layer uses the same limit for both commands.
+ *
+ * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
+ * are negotiated, we will use the minimum between the limits.
+ *
+ * discard sector alignment is set to the minimum between discard_sector_alignment
+ * and secure_erase_sector_alignment.
+ *
+ * max discard sectors is set to the minimum between max_discard_seg and
+ * max_secure_erase_seg.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ secure_erase_sector_alignment, &v);
+
+ /* secure_erase_sector_alignment should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: secure_erase_sector_alignment can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ discard_granularity = min_not_zero(discard_granularity, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_sectors, &v);
+
+ /* max_secure_erase_sectors should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_sectors can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ blk_queue_max_secure_erase_sectors(q, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_seg, &v);
+
+ /* max_secure_erase_seg should not be zero, the device should set a
+ * valid number of segments
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_seg can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ max_discard_segs = min_not_zero(max_discard_segs, v);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
+ virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+ /* max_discard_seg and discard_granularity will be 0 only
+ * if max_discard_seg and discard_sector_alignment fields in the virtio
+ * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
+ * In this case, we use default values.
+ */
+ if (!max_discard_segs)
+ max_discard_segs = sg_elems;
+
+ blk_queue_max_discard_segments(q,
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+
+ if (discard_granularity)
+ q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
+ }
+
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
@@ -1167,6 +1239,7 @@ static unsigned int features_legacy[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
}
;
static unsigned int features[] = {
@@ -1174,6 +1247,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e551433cd107..966aab902d19 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,9 +52,6 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
-#ifdef CONFIG_ZRAM_WRITEBACK
-static const struct block_device_operations zram_wb_devops;
-#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -329,8 +326,8 @@ static ssize_t idle_store(struct device *dev,
if (!sysfs_streq(buf, "all")) {
/*
- * If it did not parse as 'all' try to treat it as an integer when
- * we have memory tracking enabled.
+ * If it did not parse as 'all' try to treat it as an integer
+ * when we have memory tracking enabled.
*/
u64 age_sec;
@@ -345,7 +342,10 @@ static ssize_t idle_store(struct device *dev,
if (!init_done(zram))
goto out_unlock;
- /* A cutoff_time of 0 marks everything as idle, this is the "all" behavior */
+ /*
+ * A cutoff_time of 0 marks everything as idle, this is the
+ * "all" behavior.
+ */
mark_idle(zram, cutoff_time);
rv = len;
@@ -543,17 +543,6 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
- /*
- * With writeback feature, zram does asynchronous IO so it's no longer
- * synchronous device so let's remove synchronous io flag. Othewise,
- * upper layer(e.g., swap) could wait IO completion rather than
- * (submit and return), which will cause system sluggish.
- * Furthermore, when the IO function returns(e.g., swap_readpage),
- * upper layer expects IO was done so it could deallocate the page
- * freely but in fact, IO is going on so finally could cause
- * use-after-free when the IO is really done.
- */
- zram->disk->fops = &zram_wb_devops;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1267,6 +1256,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio_vec bvec;
zram_slot_unlock(zram, index);
+ /* A null bio means rw_page was used, we must fallback to bio */
+ if (!bio)
+ return -EOPNOTSUPP;
bvec.bv_page = page;
bvec.bv_len = PAGE_SIZE;
@@ -1410,9 +1402,19 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- if (!IS_ERR((void *)handle))
+ if (IS_ERR((void *)handle))
+ return PTR_ERR((void *)handle);
+
+ if (comp_len != PAGE_SIZE)
goto compress_again;
- return PTR_ERR((void *)handle);
+ /*
+ * If the page is not compressible, you need to acquire the
+ * lock and execute the code below. The zcomp_stream_get()
+ * call is needed to disable the cpu hotplug and grab the
+ * zstrm buffer back. It is necessary that the dereferencing
+ * of the zstrm variable below occurs correctly.
+ */
+ zstrm = zcomp_stream_get(zram->comp);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1710,9 +1712,6 @@ out:
static void zram_reset_device(struct zram *zram)
{
- struct zcomp *comp;
- u64 disksize;
-
down_write(&zram->init_lock);
zram->limit_pages = 0;
@@ -1722,17 +1721,15 @@ static void zram_reset_device(struct zram *zram)
return;
}
- comp = zram->comp;
- disksize = zram->disksize;
- zram->disksize = 0;
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
/* I/O operation under all of CPU are done so let's free */
- zram_meta_free(zram, disksize);
+ zram_meta_free(zram, zram->disksize);
+ zram->disksize = 0;
memset(&zram->stats, 0, sizeof(zram->stats));
- zcomp_destroy(comp);
+ zcomp_destroy(zram->comp);
+ zram->comp = NULL;
reset_bdev(zram);
up_write(&zram->init_lock);
@@ -1848,15 +1845,6 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
-#ifdef CONFIG_ZRAM_WRITEBACK
-static const struct block_device_operations zram_wb_devops = {
- .open = zram_open,
- .submit_bio = zram_submit_bio,
- .swap_slot_free_notify = zram_slot_free_notify,
- .owner = THIS_MODULE
-};
-#endif
-
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
@@ -2126,6 +2114,8 @@ static int __init zram_init(void)
{
int ret;
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
if (ret < 0)
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 80c3b43b4828..a2bda53020fd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,16 +30,15 @@
/*
- * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
- * object size (excluding header), the higher bits is for
- * zram_pageflags.
+ * ZRAM is mainly used for memory efficiency so we want to keep memory
+ * footprint small and thus squeeze size and zram pageflags into a flags
+ * member. The lower ZRAM_FLAG_SHIFT bits is for object size (excluding
+ * header), which cannot be larger than PAGE_SIZE (requiring PAGE_SHIFT
+ * bits), the higher bits are for zram_pageflags.
*
- * zram is mainly used for memory efficiency so we want to keep memory
- * footprint small so we can squeeze size and flags into a field.
- * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
- * the higher bits is for zram_pageflags.
+ * We use BUILD_BUG_ON() to make sure that zram pageflags don't overflow.
*/
-#define ZRAM_FLAG_SHIFT 24
+#define ZRAM_FLAG_SHIFT (PAGE_SHIFT + 1)
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index fa2246da63c1..caa4ce28cf9e 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -843,7 +843,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct mhi_controller *mhi_cntrl;
int err;
- dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+ dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
/* mhi_pdev.mhi_cntrl must be zero-initialized */
mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 5dc2669432ba..d51573ac525e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -466,18 +466,7 @@ static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
struct mvebu_mbus_state *mbus = &mbus_state;
return mbus->soc->show_cpu_target(mbus, seq, v);
}
-
-static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_sdram_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_sdram_debug_fops = {
- .open = mvebu_sdram_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_sdram_debug);
static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
{
@@ -516,18 +505,7 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_devs_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_devs_debug_fops = {
- .open = mvebu_devs_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_devs_debug);
/*
* SoC-specific functions and definitions
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
- if ((int)res.a0 < 0)
- return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index e7dd457e9b22..e98fcac578d6 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
- cpu_relax();
+ hwrng_msleep(rng, 1000);
}
num_words = rng_readl(priv, RNG_STATUS) >> 24;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 16f227b995e8..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void hwrng_manage_rngd(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
return 0;
}
@@ -167,8 +176,6 @@ skip_init:
rng->quality = 1024;
current_quality = rng->quality; /* obsolete */
- hwrng_manage_rngd(rng);
-
return 0;
}
@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
/* the best available RNG may have changed */
ret = enable_best_rng();
- /* start/stop rngd if necessary */
- if (current_rng)
- hwrng_manage_rngd(current_rng);
-
out:
mutex_unlock(&rng_mutex);
return ret ? ret : len;
@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
rng->quality = current_quality; /* obsolete */
quality = rng->quality;
mutex_unlock(&reading_mutex);
- put_rng(rng);
- if (!quality)
- break;
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
- if (rc <= 0) {
- pr_warn("hwrng: no data available\n");
- msleep_interruptible(10000);
+ put_rng(rng);
+
+ if (rc <= 0)
continue;
- }
/* If we cannot credit at least one bit of entropy,
* keep track of the remainder for the next iteration
@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
return 0;
}
-static void hwrng_manage_rngd(struct hwrng *rng)
-{
- if (WARN_ON(!mutex_is_locked(&rng_mutex)))
- return;
-
- if (rng->quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (rng->quality > 0 && !hwrng_fill) {
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
- }
- }
-}
-
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
+ complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
static int __init hwrng_modinit(void)
{
int ret;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(rngc->clk);
- if (ret)
- return ret;
-
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
- ret = -ENODEV;
- goto err;
- }
-
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
init_completion(&rngc->rng_op_done);
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
- goto err;
+ return ret;
}
}
- ret = hwrng_register(&rngc->rng);
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
- goto err;
+ return ret;
}
dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
-
-err:
- clk_disable_unprepare(rngc->clk);
-
- return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
- struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rngc->rng);
-
- clk_disable_unprepare(rngc->clk);
-
- return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
- .name = "imx_rngc",
+ .name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
- .remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index b061e6b513ed..39565cf74b2c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -119,13 +119,13 @@ config ASPEED_KCS_IPMI_BMC
provides the access of KCS IO space for BMC side.
config NPCM7XX_KCS_IPMI_BMC
- depends on ARCH_NPCM7XX || COMPILE_TEST
+ depends on ARCH_NPCM || COMPILE_TEST
select IPMI_KCS_BMC
select REGMAP_MMIO
- tristate "NPCM7xx KCS IPMI BMC driver"
+ tristate "NPCM KCS IPMI BMC driver"
help
Provides a driver for the KCS (Keyboard Controller Style) IPMI
- interface found on Nuvoton NPCM7xx SOCs.
+ interface found on Nuvoton NPCM SOCs.
The driver implements the BMC side of the KCS contorller, it
provides the access of KCS IO space for BMC side.
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 25c010c9ec25..7c1aee5e11b7 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -218,8 +218,8 @@ static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
{
if ((msg->data[0] >> 2) & 1) {
/*
- * It's a response being sent, we needto return a
- * response response. Fake a send msg command
+ * It's a response being sent, we need to return a
+ * response to the response. Fake a send msg command
* response with channel 0. This will always be ipmb
* direct.
*/
@@ -424,10 +424,8 @@ static void ipmi_ipmb_request_events(void *send_info)
/* We don't fetch events here. */
}
-static void ipmi_ipmb_remove(struct i2c_client *client)
+static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev)
{
- struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
-
if (iidev->slave) {
i2c_slave_unregister(iidev->slave);
if (iidev->slave != iidev->client)
@@ -436,7 +434,13 @@ static void ipmi_ipmb_remove(struct i2c_client *client)
iidev->slave = NULL;
iidev->client = NULL;
ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+ ipmi_ipmb_cleanup(iidev);
ipmi_unregister_smi(iidev->intf);
}
@@ -542,7 +546,7 @@ static int ipmi_ipmb_probe(struct i2c_client *client)
out_err:
if (slave && slave != client)
i2c_unregister_device(slave);
- ipmi_ipmb_remove(client);
+ ipmi_ipmb_cleanup(iidev);
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 703433493c85..49a1707693c9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -736,12 +736,6 @@ static void intf_free(struct kref *ref)
kfree(intf);
}
-struct watcher_entry {
- int intf_num;
- struct ipmi_smi *intf;
- struct list_head link;
-};
-
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
@@ -4357,7 +4351,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
/*
* The message starts at byte 4 which follows the
- * the Channel Byte in the "GET MESSAGE" command
+ * Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data, &msg->rsp[4],
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 13da021e7c6b..e1072809fe31 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2098,7 +2098,7 @@ static struct platform_driver ipmi_driver = {
.id_table = ssif_plat_ids
};
-static int init_ipmi_ssif(void)
+static int __init init_ipmi_ssif(void)
{
int i;
int rv;
@@ -2140,7 +2140,7 @@ static int init_ipmi_ssif(void)
}
module_init(init_ipmi_ssif);
-static void cleanup_ipmi_ssif(void)
+static void __exit cleanup_ipmi_ssif(void)
{
if (!initialized)
return;
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index cdc88cde1e9a..19c32bf50e0e 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -207,17 +207,24 @@ static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask,
}
/*
- * AST_usrGuide_KCS.pdf
- * 2. Background:
- * we note D for Data, and C for Cmd/Status, default rules are
- * A. KCS1 / KCS2 ( D / C:X / X+4 )
- * D / C : CA0h / CA4h
- * D / C : CA8h / CACh
- * B. KCS3 ( D / C:XX2h / XX3h )
- * D / C : CA2h / CA3h
- * D / C : CB2h / CB3h
- * C. KCS4
- * D / C : CA4h / CA5h
+ * We note D for Data, and C for Cmd/Status, default rules are
+ *
+ * 1. Only the D address is given:
+ * A. KCS1/KCS2 (D/C: X/X+4)
+ * D/C: CA0h/CA4h
+ * D/C: CA8h/CACh
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ * C. KCS4 (D/C: X/X+1)
+ * D/C: CA4h/CA5h
+ *
+ * 2. Both the D/C addresses are given:
+ * A. KCS1/KCS2/KCS4 (D/C: X/Y)
+ * D/C: CA0h/CA1h
+ * D/C: CA8h/CA9h
+ * D/C: CA4h/CA5h
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
*/
static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
{
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
index 486834a962c3..cf670e891966 100644
--- a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -548,7 +548,7 @@ static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
.ops = &kcs_bmc_ipmi_driver_ops,
};
-static int kcs_bmc_ipmi_init(void)
+static int __init kcs_bmc_ipmi_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
@@ -556,7 +556,7 @@ static int kcs_bmc_ipmi_init(void)
}
module_init(kcs_bmc_ipmi_init);
-static void kcs_bmc_ipmi_exit(void)
+static void __exit kcs_bmc_ipmi_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7e2067628a6c..1793358be782 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -140,7 +140,7 @@ static struct kcs_bmc_driver kcs_bmc_serio_driver = {
.ops = &kcs_bmc_serio_driver_ops,
};
-static int kcs_bmc_serio_init(void)
+static int __init kcs_bmc_serio_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_serio_driver);
@@ -148,7 +148,7 @@ static int kcs_bmc_serio_init(void)
}
module_init(kcs_bmc_serio_init);
-static void kcs_bmc_serio_exit(void)
+static void __exit kcs_bmc_serio_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 32a932a065a6..5611d127363e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -712,8 +712,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 8fc49b038372..b2735be81ab2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2274,7 +2274,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
* tty pointer to tty structure
* termios pointer to buffer to hold returned old termios
*/
-static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void mgslpc_set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned long flags;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 79d7d4e4e582..2fe28eeb2f38 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -96,8 +96,8 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
* Returns whether or not the input pool has been seeded and thus guaranteed
* to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
+ * u16,u32,u64,long} family of functions.
*
* Returns: true if the input pool has been seeded.
* false if the input pool has not been seeded.
@@ -119,9 +119,9 @@ static void try_to_generate_entropy(void);
/*
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
+ * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
+ * int,long} family of functions. Using any of these functions without first
+ * calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
* -ERESTARTSYS if the function was interrupted by a signal.
@@ -157,14 +157,15 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* There are a few exported interfaces for use by other drivers:
*
* void get_random_bytes(void *buf, size_t len)
+ * u8 get_random_u8()
+ * u16 get_random_u16()
* u32 get_random_u32()
* u64 get_random_u64()
- * unsigned int get_random_int()
* unsigned long get_random_long()
*
* These interfaces will return the requested number of random bytes
* into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The u32, u64, int, and long family of
+ * a read from /dev/urandom. The u8, u16, u32, u64, long family of
* functions may be higher performance for one-off random integers,
* because they do a bit of buffering and do not invoke reseeding
* until the buffer is emptied.
@@ -260,25 +261,23 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
}
/*
- * Return whether the crng seed is considered to be sufficiently old
- * that a reseeding is needed. This happens if the last reseeding
- * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
* proportional to the uptime.
*/
-static bool crng_has_old_seed(void)
+static unsigned int crng_reseed_interval(void)
{
static bool early_boot = true;
- unsigned long interval = CRNG_RESEED_INTERVAL;
if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
- interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
}
- return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+ return CRNG_RESEED_INTERVAL;
}
/*
@@ -320,7 +319,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* If the base_crng is old enough, we reseed, which in turn bumps the
* generation counter that we check below.
*/
- if (unlikely(crng_has_old_seed()))
+ if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
crng_reseed();
local_lock_irqsave(&crngs.lock, flags);
@@ -384,11 +383,11 @@ static void _get_random_bytes(void *buf, size_t len)
}
/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. In order to ensure that the randomness
- * by this function is okay, the function wait_for_random_bytes()
- * should be called and return 0 at least once at any point prior.
+ * This function is the exported kernel interface. It returns some number of
+ * good random numbers, suitable for key generation, seeding TCP sequence
+ * numbers, etc. In order to ensure that the randomness returned by this
+ * function is okay, the function wait_for_random_bytes() should be called and
+ * return 0 at least once at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
@@ -506,8 +505,10 @@ type get_random_ ##type(void) \
} \
EXPORT_SYMBOL(get_random_ ##type);
-DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u8)
+DEFINE_BATCHED_ENTROPY(u16)
DEFINE_BATCHED_ENTROPY(u32)
+DEFINE_BATCHED_ENTROPY(u64)
#ifdef CONFIG_SMP
/*
@@ -522,6 +523,8 @@ int __cold random_prepare_cpu(unsigned int cpu)
* randomness.
*/
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
return 0;
@@ -774,18 +777,13 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
/*
- * The first collection of entropy occurs at system boot while interrupts
- * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
- * utsname(), and the command line. Depending on the above configuration knob,
- * RDSEED may be considered sufficient for initialization. Note that much
- * earlier setup may already have pushed entropy into the input pool by the
- * time we get here.
+ * This is called extremely early, before time keeping functionality is
+ * available, but arch randomness is. Interrupts are not yet enabled.
*/
-int __init random_init(const char *command_line)
+void __init random_init_early(const char *command_line)
{
- ktime_t now = ktime_get_real();
- size_t i, longs, arch_bits;
unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
+ size_t i, longs, arch_bits;
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
@@ -805,34 +803,49 @@ int __init random_init(const char *command_line)
i += longs;
continue;
}
- entropy[0] = random_get_entropy();
- _mix_pool_bytes(entropy, sizeof(*entropy));
arch_bits -= sizeof(*entropy) * 8;
++i;
}
- _mix_pool_bytes(&now, sizeof(now));
- _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+
+ _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
_mix_pool_bytes(command_line, strlen(command_line));
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
+}
+
+/*
+ * This is called a little bit after the prior function, and now there is
+ * access to timestamps counters. Interrupts are not yet enabled.
+ */
+void __init random_init(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t now = ktime_get_real();
+
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
add_latent_entropy();
/*
- * If we were initialized by the bootloader before jump labels are
- * initialized, then we should enable the static branch here, where
+ * If we were initialized by the cpu or bootloader before jump labels
+ * are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
+ /* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed();
- else if (trust_cpu)
- _credit_init_bits(arch_bits);
WARN_ON(register_pm_notifier(&pm_notifier));
- WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
- "entropy collection will consequently suffer.");
- return 0;
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
+ "entropy collection will consequently suffer.");
}
/*
@@ -866,11 +879,11 @@ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
credit_init_bits(entropy);
/*
- * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
- * we're not yet initialized.
+ * Throttle writing to once every reseed interval, unless we're not yet
+ * initialized or no entropy is credited.
*/
- if (!kthread_should_stop() && crng_ready())
- schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
+ if (!kthread_should_stop() && (crng_ready() || !entropy))
+ schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
@@ -920,20 +933,23 @@ EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
struct fast_pool {
- struct work_struct mix;
unsigned long pool[4];
unsigned long last;
unsigned int count;
+ struct timer_list mix;
};
+static void mix_interrupt_randomness(struct timer_list *work);
+
static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
- .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
- .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};
/*
@@ -975,7 +991,7 @@ int __cold random_online_cpu(unsigned int cpu)
}
#endif
-static void mix_interrupt_randomness(struct work_struct *work)
+static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
@@ -1006,7 +1022,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_enable();
mix_pool_bytes(pool, sizeof(pool));
- credit_init_bits(max(1u, (count & U16_MAX) / 64));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
memzero_explicit(pool, sizeof(pool));
}
@@ -1029,10 +1045,11 @@ void add_interrupt_randomness(int irq)
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
- if (unlikely(!fast_pool->mix.func))
- INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
fast_pool->count |= MIX_INFLIGHT;
- queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1191,7 +1208,7 @@ static void __cold entropy_timer(struct timer_list *timer)
*/
static void __cold try_to_generate_entropy(void)
{
- enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
struct entropy_timer_state stack;
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
@@ -1210,7 +1227,7 @@ static void __cold try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies + 1);
+ mod_timer(&stack.timer, jiffies);
mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
schedule();
stack.entropy = random_get_entropy();
@@ -1347,6 +1364,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 40018a73b3cb..bc7b1b4501b3 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -380,7 +380,7 @@ void tpm_add_ppi(struct tpm_chip *chip)
TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {
- strlcpy(chip->ppi_version, obj->string.pointer,
+ strscpy(chip->ppi_version, obj->string.pointer,
sizeof(chip->ppi_version));
ACPI_FREE(obj);
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 48f8f4221e21..d79905f3e174 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -249,7 +249,7 @@ config COMMON_CLK_GEMINI
platform, also known as SL3516 or CS3516.
config COMMON_CLK_LAN966X
- bool "Generic Clock Controller driver for LAN966X SoC"
+ tristate "Generic Clock Controller driver for LAN966X SoC"
depends on HAS_IOMEM
depends on OF
depends on SOC_LAN966 || COMPILE_TEST
@@ -377,6 +377,15 @@ config COMMON_CLK_VC5
This driver supports the IDT VersaClock 5 and VersaClock 6
programmable clock generators.
+config COMMON_CLK_VC7
+ tristate "Clock driver for Renesas Versaclock 7 devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ Renesas Versaclock7 is a family of configurable clock generator
+ and jitter attenuator ICs with fractional and integer dividers.
+
config COMMON_CLK_STM32MP135
def_bool COMMON_CLK && MACH_STM32MP13
help
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5db170d38d2..e3ca0d058a25 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_RS9_PCIE) += clk-renesas-pcie.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
+obj-$(CONFIG_COMMON_CLK_VC7) += clk-versaclock7.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index d429ba52a719..943ea67bf135 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -136,7 +136,6 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
{
struct clk_generated *gck = to_clk_generated(hw);
struct clk_hw *parent = NULL;
- struct clk_rate_request req_parent = *req;
long best_rate = -EINVAL;
unsigned long min_rate, parent_rate;
int best_diff = -1;
@@ -192,7 +191,9 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
goto end;
for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
- req_parent.rate = req->rate * div;
+ struct clk_rate_request req_parent;
+
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate * div);
if (__clk_determine_rate(parent, &req_parent))
continue;
clk_generated_best_diff(req, parent, req_parent.rate, div,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 164e2959c7cf..b7cd1924de52 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -581,7 +581,6 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_master *master = to_clk_master(hw);
- struct clk_rate_request req_parent = *req;
struct clk_hw *parent;
long best_rate = LONG_MIN, best_diff = LONG_MIN;
unsigned long parent_rate;
@@ -618,11 +617,15 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
goto end;
for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+ struct clk_rate_request req_parent;
+ unsigned long req_rate;
+
if (div == MASTER_PRES_MAX)
- req_parent.rate = req->rate * 3;
+ req_rate = req->rate * 3;
else
- req_parent.rate = req->rate << div;
+ req_rate = req->rate << div;
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req_rate);
if (__clk_determine_rate(parent, &req_parent))
continue;
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e14fa5ac734c..5104d4025484 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -269,7 +269,6 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
- struct clk_rate_request req_parent = *req;
unsigned long parent_rate = clk_hw_get_rate(parent);
unsigned long tmp_rate;
long best_rate = LONG_MIN;
@@ -302,8 +301,9 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
goto end;
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- req_parent.rate = req->rate << shift;
+ struct clk_rate_request req_parent;
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate << shift);
if (__clk_determine_rate(parent, &req_parent))
continue;
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index 8ca8bcacf66d..85a964cb2d89 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -33,8 +33,11 @@ static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -56,8 +59,11 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -79,8 +85,11 @@ static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -120,7 +129,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[GENERATED_SOURCE_MAX];
- struct device_node *gcknp;
+ struct device_node *gcknp, *parent_np;
struct clk_range range = CLK_RANGE(0, 0);
struct regmap *regmap;
@@ -134,7 +143,9 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
if (!num || num > PERIPHERAL_MAX)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -180,8 +191,11 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -243,12 +257,15 @@ static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
const char *parent_name;
struct regmap *regmap;
bool bypass;
+ struct device_node *parent_np;
of_property_read_string(np, "clock-output-names", &name);
bypass = of_property_read_bool(np, "atmel,osc-bypass");
parent_name = of_clk_get_parent_name(np, 0);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -268,12 +285,15 @@ static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
u32 accuracy = 0;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
of_property_read_string(np, "clock-output-names", &name);
of_property_read_u32(np, "clock-frequency", &frequency);
of_property_read_u32(np, "clock-accuracy", &accuracy);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -292,11 +312,14 @@ static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -316,13 +339,16 @@ static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
unsigned int num_parents;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -373,6 +399,7 @@ of_at91_clk_master_setup(struct device_node *np,
const char *name = np->name;
struct clk_master_characteristics *characteristics;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
@@ -386,7 +413,9 @@ of_at91_clk_master_setup(struct device_node *np,
if (!characteristics)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -433,6 +462,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
const char *name;
struct device_node *periphclknp;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -442,7 +472,9 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
if (!num || num > PERIPHERAL_MAX)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -601,6 +633,7 @@ of_at91_clk_pll_setup(struct device_node *np,
struct regmap *regmap;
const char *parent_name;
const char *name = np->name;
+ struct device_node *parent_np;
struct clk_pll_characteristics *characteristics;
if (of_property_read_u32(np, "reg", &id))
@@ -610,7 +643,9 @@ of_at91_clk_pll_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -665,12 +700,15 @@ of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -694,7 +732,7 @@ of_at91_clk_prog_setup(struct device_node *np,
unsigned int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
const char *name;
- struct device_node *progclknp;
+ struct device_node *progclknp, *parent_np;
struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
@@ -707,7 +745,9 @@ of_at91_clk_prog_setup(struct device_node *np,
if (!num || num > (PROG_ID_MAX + 1))
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -756,13 +796,16 @@ static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
unsigned int num_parents;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents != 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -788,6 +831,7 @@ static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
@@ -797,7 +841,9 @@ static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -818,7 +864,7 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
u32 id;
struct clk_hw *hw;
const char *name;
- struct device_node *sysclknp;
+ struct device_node *sysclknp, *parent_np;
const char *parent_name;
struct regmap *regmap;
@@ -826,7 +872,9 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
if (num > (SYSTEM_MAX_ID + 1))
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -859,6 +907,7 @@ static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
@@ -868,7 +917,9 @@ static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -888,6 +939,7 @@ static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -895,7 +947,9 @@ static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -915,6 +969,7 @@ static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -926,7 +981,9 @@ static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
@@ -946,12 +1003,15 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap_pmc, *regmap_sfr;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap_pmc = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_pmc))
return;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index cfd0f5e23b99..84156dc52bff 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -120,6 +120,16 @@ static const struct {
struct clk_range r;
int chg_pid;
} sama5d2_gck[] = {
+ { .n = "flx0_gclk", .id = 19, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx1_gclk", .id = 20, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx2_gclk", .id = 21, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx3_gclk", .id = 22, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx4_gclk", .id = 23, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart0_gclk", .id = 24, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart1_gclk", .id = 25, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart2_gclk", .id = 26, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart3_gclk", .id = 27, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart4_gclk", .id = 28, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
{ .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
{ .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
{ .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig
index 03102f1094bc..f0b186830324 100644
--- a/drivers/clk/baikal-t1/Kconfig
+++ b/drivers/clk/baikal-t1/Kconfig
@@ -29,7 +29,6 @@ config CLK_BT1_CCU_PLL
config CLK_BT1_CCU_DIV
bool "Baikal-T1 CCU Dividers support"
- select RESET_CONTROLLER
select MFD_SYSCON
default MIPS_BAIKAL_T1
help
@@ -39,4 +38,15 @@ config CLK_BT1_CCU_DIV
either gateable or ungateable. Some of the CCU dividers can be as well
used to reset the domains they're supplying clock to.
+config CLK_BT1_CCU_RST
+ bool "Baikal-T1 CCU Resets support"
+ select RESET_CONTROLLER
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the CCU reset blocks responsible for the
+ AXI-bus and some subsystems reset. These are mainly the
+ self-deasserted reset controls but there are several lines which
+ can be directly asserted/de-asserted (PCIe and DDR sub-domains).
+
endif
diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile
index b3b9590b95ed..9c3637de9407 100644
--- a/drivers/clk/baikal-t1/Makefile
+++ b/drivers/clk/baikal-t1/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o
obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o
+obj-$(CONFIG_CLK_BT1_CCU_RST) += ccu-rst.o
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 4062092d67f9..8d5fc7158f33 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -34,9 +34,9 @@
#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
+#define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
-#define CCU_DIV_RST_DELAY_US 1
#define CCU_DIV_LOCK_CHECK_RETRIES 50
#define CCU_DIV_CLKDIV_MIN 0
@@ -170,6 +170,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw)
return !!(val & CCU_DIV_CTL_EN);
}
+static int ccu_div_buf_enable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static void ccu_div_buf_disable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
+ spin_unlock_irqrestore(&div->lock, flags);
+}
+
+static int ccu_div_buf_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+
+ return !(val & CCU_DIV_CTL_GATE_REF_BUF);
+}
+
static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -288,24 +322,6 @@ static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-int ccu_div_reset_domain(struct ccu_div *div)
-{
- unsigned long flags;
-
- if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
- return -EINVAL;
-
- spin_lock_irqsave(&div->lock, flags);
- regmap_update_bits(div->sys_regs, div->reg_ctl,
- CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
- spin_unlock_irqrestore(&div->lock, flags);
-
- /* The next delay must be enough to cover all the resets. */
- udelay(CCU_DIV_RST_DELAY_US);
-
- return 0;
-}
-
#ifdef CONFIG_DEBUG_FS
struct ccu_div_dbgfs_bit {
@@ -323,6 +339,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
+ CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
};
@@ -441,6 +458,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
continue;
}
+ if (!strcmp("div_buf", name))
+ continue;
+
bits[didx] = ccu_div_bits[bidx];
bits[didx].div = div;
@@ -477,6 +497,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
&ccu_div_dbgfs_fixed_clkdiv_fops);
}
+static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bit;
+
+ bit = kmalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return;
+
+ *bit = ccu_div_bits[3];
+ bit->div = div;
+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
+ &ccu_div_dbgfs_bit_fops);
+}
+
static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct ccu_div *div = to_ccu_div(hw);
@@ -489,6 +524,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
#define ccu_div_var_debug_init NULL
#define ccu_div_gate_debug_init NULL
+#define ccu_div_buf_debug_init NULL
#define ccu_div_fixed_debug_init NULL
#endif /* !CONFIG_DEBUG_FS */
@@ -520,6 +556,13 @@ static const struct clk_ops ccu_div_gate_ops = {
.debug_init = ccu_div_gate_debug_init
};
+static const struct clk_ops ccu_div_buf_ops = {
+ .enable = ccu_div_buf_enable,
+ .disable = ccu_div_buf_disable,
+ .is_enabled = ccu_div_buf_is_enabled,
+ .debug_init = ccu_div_buf_debug_init
+};
+
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
.round_rate = ccu_div_fixed_round_rate,
@@ -566,6 +609,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
} else if (div_init->type == CCU_DIV_GATE) {
hw_init.ops = &ccu_div_gate_ops;
div->divider = div_init->divider;
+ } else if (div_init->type == CCU_DIV_BUF) {
+ hw_init.ops = &ccu_div_buf_ops;
} else if (div_init->type == CCU_DIV_FIXED) {
hw_init.ops = &ccu_div_fixed_ops;
div->divider = div_init->divider;
@@ -579,6 +624,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
goto err_free_div;
}
parent_data.fw_name = div_init->parent_name;
+ parent_data.name = div_init->parent_name;
hw_init.parent_data = &parent_data;
hw_init.num_parents = 1;
diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h
index 795665caefbd..76d8ee44d415 100644
--- a/drivers/clk/baikal-t1/ccu-div.h
+++ b/drivers/clk/baikal-t1/ccu-div.h
@@ -14,14 +14,25 @@
#include <linux/of.h>
/*
+ * CCU Divider private clock IDs
+ * @CCU_SYS_SATA_CLK: CCU SATA internal clock
+ * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock
+ */
+#define CCU_SYS_SATA_CLK -1
+#define CCU_SYS_XGMAC_CLK -2
+
+/*
* CCU Divider private flags
+ * @CCU_DIV_BASIC: Basic divider clock required by the kernel as early as
+ * possible.
* @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
* It can be 0 though, which is functionally the same.
* @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3].
* It can be either 0 or greater than 3.
* @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position.
- * @CCU_DIV_RESET_DOMAIN: Provide reset clock domain method.
+ * @CCU_DIV_RESET_DOMAIN: There is a clock domain reset handle.
*/
+#define CCU_DIV_BASIC BIT(0)
#define CCU_DIV_SKIP_ONE BIT(1)
#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2)
#define CCU_DIV_LOCK_SHIFTED BIT(3)
@@ -31,11 +42,13 @@
* enum ccu_div_type - CCU Divider types
* @CCU_DIV_VAR: Clocks gate with variable divider.
* @CCU_DIV_GATE: Clocks gate with fixed divider.
+ * @CCU_DIV_BUF: Clock gate with no divider.
* @CCU_DIV_FIXED: Ungateable clock with fixed divider.
*/
enum ccu_div_type {
CCU_DIV_VAR,
CCU_DIV_GATE,
+ CCU_DIV_BUF,
CCU_DIV_FIXED
};
@@ -105,6 +118,4 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init);
void ccu_div_hw_unregister(struct ccu_div *div);
-int ccu_div_reset_domain(struct ccu_div *div);
-
#endif /* __CLK_BT1_CCU_DIV_H__ */
diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h
index 76cd9132a219..a71bfd7b90ec 100644
--- a/drivers/clk/baikal-t1/ccu-pll.h
+++ b/drivers/clk/baikal-t1/ccu-pll.h
@@ -14,6 +14,12 @@
#include <linux/of.h>
/*
+ * CCU PLL private flags
+ * @CCU_PLL_BASIC: Basic PLL required by the kernel as early as possible.
+ */
+#define CCU_PLL_BASIC BIT(0)
+
+/*
* struct ccu_pll_init_data - CCU PLL initialization data
* @id: Clock private identifier.
* @name: Clocks name.
@@ -22,6 +28,7 @@
* @sys_regs: Baikal-T1 System Controller registers map.
* @np: Pointer to the node describing the CCU PLLs.
* @flags: PLL clock flags.
+ * @features: PLL private features.
*/
struct ccu_pll_init_data {
unsigned int id;
@@ -31,6 +38,7 @@ struct ccu_pll_init_data {
struct regmap *sys_regs;
struct device_node *np;
unsigned long flags;
+ unsigned long features;
};
/*
diff --git a/drivers/clk/baikal-t1/ccu-rst.c b/drivers/clk/baikal-t1/ccu-rst.c
new file mode 100644
index 000000000000..40023ea67463
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-rst.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Resets interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-rst: " fmt
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/reset/bt1-ccu.h>
+
+#include "ccu-rst.h"
+
+#define CCU_AXI_MAIN_BASE 0x030
+#define CCU_AXI_DDR_BASE 0x034
+#define CCU_AXI_SATA_BASE 0x038
+#define CCU_AXI_GMAC0_BASE 0x03C
+#define CCU_AXI_GMAC1_BASE 0x040
+#define CCU_AXI_XGMAC_BASE 0x044
+#define CCU_AXI_PCIE_M_BASE 0x048
+#define CCU_AXI_PCIE_S_BASE 0x04C
+#define CCU_AXI_USB_BASE 0x050
+#define CCU_AXI_HWA_BASE 0x054
+#define CCU_AXI_SRAM_BASE 0x058
+
+#define CCU_SYS_DDR_BASE 0x02c
+#define CCU_SYS_SATA_REF_BASE 0x060
+#define CCU_SYS_APB_BASE 0x064
+#define CCU_SYS_PCIE_BASE 0x144
+
+#define CCU_RST_DELAY_US 1
+
+#define CCU_RST_TRIG(_base, _ofs) \
+ { \
+ .type = CCU_RST_TRIG, \
+ .base = _base, \
+ .mask = BIT(_ofs), \
+ }
+
+#define CCU_RST_DIR(_base, _ofs) \
+ { \
+ .type = CCU_RST_DIR, \
+ .base = _base, \
+ .mask = BIT(_ofs), \
+ }
+
+struct ccu_rst_info {
+ enum ccu_rst_type type;
+ unsigned int base;
+ unsigned int mask;
+};
+
+/*
+ * Each AXI-bus clock divider is equipped with the corresponding clock-consumer
+ * domain reset (it's self-deasserted reset control).
+ */
+static const struct ccu_rst_info axi_rst_info[] = {
+ [CCU_AXI_MAIN_RST] = CCU_RST_TRIG(CCU_AXI_MAIN_BASE, 1),
+ [CCU_AXI_DDR_RST] = CCU_RST_TRIG(CCU_AXI_DDR_BASE, 1),
+ [CCU_AXI_SATA_RST] = CCU_RST_TRIG(CCU_AXI_SATA_BASE, 1),
+ [CCU_AXI_GMAC0_RST] = CCU_RST_TRIG(CCU_AXI_GMAC0_BASE, 1),
+ [CCU_AXI_GMAC1_RST] = CCU_RST_TRIG(CCU_AXI_GMAC1_BASE, 1),
+ [CCU_AXI_XGMAC_RST] = CCU_RST_TRIG(CCU_AXI_XGMAC_BASE, 1),
+ [CCU_AXI_PCIE_M_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_M_BASE, 1),
+ [CCU_AXI_PCIE_S_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_S_BASE, 1),
+ [CCU_AXI_USB_RST] = CCU_RST_TRIG(CCU_AXI_USB_BASE, 1),
+ [CCU_AXI_HWA_RST] = CCU_RST_TRIG(CCU_AXI_HWA_BASE, 1),
+ [CCU_AXI_SRAM_RST] = CCU_RST_TRIG(CCU_AXI_SRAM_BASE, 1),
+};
+
+/*
+ * SATA reference clock domain and APB-bus domain are connected with the
+ * sefl-deasserted reset control, which can be activated via the corresponding
+ * clock divider register. DDR and PCIe sub-domains can be reset with directly
+ * controlled reset signals. Resetting the DDR controller though won't end up
+ * well while the Linux kernel is working.
+ */
+static const struct ccu_rst_info sys_rst_info[] = {
+ [CCU_SYS_SATA_REF_RST] = CCU_RST_TRIG(CCU_SYS_SATA_REF_BASE, 1),
+ [CCU_SYS_APB_RST] = CCU_RST_TRIG(CCU_SYS_APB_BASE, 1),
+ [CCU_SYS_DDR_FULL_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 1),
+ [CCU_SYS_DDR_INIT_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 2),
+ [CCU_SYS_PCIE_PCS_PHY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 0),
+ [CCU_SYS_PCIE_PIPE0_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 4),
+ [CCU_SYS_PCIE_CORE_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 8),
+ [CCU_SYS_PCIE_PWR_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 9),
+ [CCU_SYS_PCIE_STICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 10),
+ [CCU_SYS_PCIE_NSTICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 11),
+ [CCU_SYS_PCIE_HOT_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 12),
+};
+
+static int ccu_rst_reset(struct reset_controller_dev *rcdev, unsigned long idx)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+
+ if (info->type != CCU_RST_TRIG)
+ return -EOPNOTSUPP;
+
+ regmap_update_bits(rst->sys_regs, info->base, info->mask, info->mask);
+
+ /* The next delay must be enough to cover all the resets. */
+ udelay(CCU_RST_DELAY_US);
+
+ return 0;
+}
+
+static int ccu_rst_set(struct reset_controller_dev *rcdev,
+ unsigned long idx, bool high)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+
+ if (info->type != CCU_RST_DIR)
+ return high ? -EOPNOTSUPP : 0;
+
+ return regmap_update_bits(rst->sys_regs, info->base,
+ info->mask, high ? info->mask : 0);
+}
+
+static int ccu_rst_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return ccu_rst_set(rcdev, idx, true);
+}
+
+static int ccu_rst_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return ccu_rst_set(rcdev, idx, false);
+}
+
+static int ccu_rst_status(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+ u32 val;
+
+ if (info->type != CCU_RST_DIR)
+ return -EOPNOTSUPP;
+
+ regmap_read(rst->sys_regs, info->base, &val);
+
+ return !!(val & info->mask);
+}
+
+static const struct reset_control_ops ccu_rst_ops = {
+ .reset = ccu_rst_reset,
+ .assert = ccu_rst_assert,
+ .deassert = ccu_rst_deassert,
+ .status = ccu_rst_status,
+};
+
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *rst_init)
+{
+ struct ccu_rst *rst;
+ int ret;
+
+ if (!rst_init)
+ return ERR_PTR(-EINVAL);
+
+ rst = kzalloc(sizeof(*rst), GFP_KERNEL);
+ if (!rst)
+ return ERR_PTR(-ENOMEM);
+
+ rst->sys_regs = rst_init->sys_regs;
+ if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-axi")) {
+ rst->rcdev.nr_resets = ARRAY_SIZE(axi_rst_info);
+ rst->rsts_info = axi_rst_info;
+ } else if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-sys")) {
+ rst->rcdev.nr_resets = ARRAY_SIZE(sys_rst_info);
+ rst->rsts_info = sys_rst_info;
+ } else {
+ pr_err("Incompatible DT node '%s' specified\n",
+ of_node_full_name(rst_init->np));
+ ret = -EINVAL;
+ goto err_kfree_rst;
+ }
+
+ rst->rcdev.owner = THIS_MODULE;
+ rst->rcdev.ops = &ccu_rst_ops;
+ rst->rcdev.of_node = rst_init->np;
+
+ ret = reset_controller_register(&rst->rcdev);
+ if (ret) {
+ pr_err("Couldn't register '%s' reset controller\n",
+ of_node_full_name(rst_init->np));
+ goto err_kfree_rst;
+ }
+
+ return rst;
+
+err_kfree_rst:
+ kfree(rst);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_rst_hw_unregister(struct ccu_rst *rst)
+{
+ reset_controller_unregister(&rst->rcdev);
+
+ kfree(rst);
+}
diff --git a/drivers/clk/baikal-t1/ccu-rst.h b/drivers/clk/baikal-t1/ccu-rst.h
new file mode 100644
index 000000000000..d6e8b2f671f4
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-rst.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU Resets interface driver
+ */
+#ifndef __CLK_BT1_CCU_RST_H__
+#define __CLK_BT1_CCU_RST_H__
+
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct ccu_rst_info;
+
+/*
+ * enum ccu_rst_type - CCU Reset types
+ * @CCU_RST_TRIG: Self-deasserted reset signal.
+ * @CCU_RST_DIR: Directly controlled reset signal.
+ */
+enum ccu_rst_type {
+ CCU_RST_TRIG,
+ CCU_RST_DIR,
+};
+
+/*
+ * struct ccu_rst_init_data - CCU Resets initialization data
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node with the System CCU block.
+ */
+struct ccu_rst_init_data {
+ struct regmap *sys_regs;
+ struct device_node *np;
+};
+
+/*
+ * struct ccu_rst - CCU Reset descriptor
+ * @rcdev: Reset controller descriptor.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @rsts_info: Reset flag info (base address and mask).
+ */
+struct ccu_rst {
+ struct reset_controller_dev rcdev;
+ struct regmap *sys_regs;
+ const struct ccu_rst_info *rsts_info;
+};
+#define to_ccu_rst(_rcdev) container_of(_rcdev, struct ccu_rst, rcdev)
+
+#ifdef CONFIG_CLK_BT1_CCU_RST
+
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init);
+
+void ccu_rst_hw_unregister(struct ccu_rst *rst);
+
+#else
+
+static inline
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init)
+{
+ return NULL;
+}
+
+static inline void ccu_rst_hw_unregister(struct ccu_rst *rst) {}
+
+#endif
+
+#endif /* __CLK_BT1_CCU_RST_H__ */
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index f141fda12b09..0e772e034812 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "bt1-ccu-div: " fmt
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
@@ -24,9 +25,9 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/bt1-ccu.h>
-#include <dt-bindings/reset/bt1-ccu.h>
#include "ccu-div.h"
+#include "ccu-rst.h"
#define CCU_AXI_MAIN_BASE 0x030
#define CCU_AXI_DDR_BASE 0x034
@@ -76,6 +77,16 @@
.divider = _divider \
}
+#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_BUF, \
+ .flags = _flags \
+ }
+
#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
{ \
.id = _id, \
@@ -85,12 +96,6 @@
.divider = _divider \
}
-#define CCU_DIV_RST_MAP(_rst_id, _clk_id) \
- { \
- .rst_id = _rst_id, \
- .clk_id = _clk_id \
- }
-
struct ccu_div_info {
unsigned int id;
const char *name;
@@ -105,11 +110,6 @@ struct ccu_div_info {
unsigned long features;
};
-struct ccu_div_rst_map {
- unsigned int rst_id;
- unsigned int clk_id;
-};
-
struct ccu_div_data {
struct device_node *np;
struct regmap *sys_regs;
@@ -118,11 +118,8 @@ struct ccu_div_data {
const struct ccu_div_info *divs_info;
struct ccu_div **divs;
- unsigned int rst_num;
- const struct ccu_div_rst_map *rst_map;
- struct reset_controller_dev rcdev;
+ struct ccu_rst *rsts;
};
-#define to_ccu_div_data(_rcdev) container_of(_rcdev, struct ccu_div_data, rcdev)
/*
* AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks
@@ -169,33 +166,22 @@ static const struct ccu_div_info axi_info[] = {
CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN)
};
-static const struct ccu_div_rst_map axi_rst_map[] = {
- CCU_DIV_RST_MAP(CCU_AXI_MAIN_RST, CCU_AXI_MAIN_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_DDR_RST, CCU_AXI_DDR_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_SATA_RST, CCU_AXI_SATA_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_GMAC0_RST, CCU_AXI_GMAC0_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_GMAC1_RST, CCU_AXI_GMAC1_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_XGMAC_RST, CCU_AXI_XGMAC_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_PCIE_M_RST, CCU_AXI_PCIE_M_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_PCIE_S_RST, CCU_AXI_PCIE_S_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_USB_RST, CCU_AXI_USB_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_HWA_RST, CCU_AXI_HWA_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_SRAM_RST, CCU_AXI_SRAM_CLK)
-};
-
/*
* APB-bus clock is marked as critical since it's a main communication bus
* for the SoC devices registers IO-operations.
*/
static const struct ccu_div_info sys_info[] = {
- CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk",
"sata_clk", CCU_SYS_SATA_REF_BASE, 4,
CLK_SET_RATE_GATE,
CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ "sys_sata_clk", CCU_SYS_SATA_REF_BASE,
+ CLK_SET_RATE_PARENT),
CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
"pcie_clk", CCU_SYS_APB_BASE, 5,
- CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CLK_IS_CRITICAL, CCU_DIV_BASIC | CCU_DIV_RESET_DOMAIN),
CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk",
"eth_clk", CCU_SYS_GMAC0_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk",
@@ -204,10 +190,12 @@ static const struct ccu_div_info sys_info[] = {
"eth_clk", CCU_SYS_GMAC1_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
"eth_clk", 10),
- CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
- "eth_clk", CCU_SYS_XGMAC_BASE, 8),
+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk",
+ "eth_clk", CCU_SYS_XGMAC_BASE, 1),
+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
+ "sys_xgmac_clk", 8),
CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
- "eth_clk", 10),
+ "sys_xgmac_clk", 8),
CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
"eth_clk", CCU_SYS_USB_BASE, 10),
CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
@@ -227,74 +215,58 @@ static const struct ccu_div_info sys_info[] = {
"ref_clk", 25),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk",
"ref_clk", CCU_SYS_TIMER0_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk",
"ref_clk", CCU_SYS_TIMER1_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk",
"ref_clk", CCU_SYS_TIMER2_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk",
"eth_clk", CCU_SYS_WDT_BASE, 17,
CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE)
};
-static const struct ccu_div_rst_map sys_rst_map[] = {
- CCU_DIV_RST_MAP(CCU_SYS_SATA_REF_RST, CCU_SYS_SATA_REF_CLK),
- CCU_DIV_RST_MAP(CCU_SYS_APB_RST, CCU_SYS_APB_CLK),
-};
+static struct ccu_div_data *axi_data;
+static struct ccu_div_data *sys_data;
-static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
- unsigned int clk_id)
+static void ccu_div_set_data(struct ccu_div_data *data)
{
- struct ccu_div *div;
- int idx;
-
- for (idx = 0; idx < data->divs_num; ++idx) {
- div = data->divs[idx];
- if (div && div->id == clk_id)
- return div;
- }
-
- return ERR_PTR(-EINVAL);
+ struct device_node *np = data->np;
+
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi"))
+ axi_data = data;
+ else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys"))
+ sys_data = data;
+ else
+ pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np));
}
-static int ccu_div_reset(struct reset_controller_dev *rcdev,
- unsigned long rst_id)
+static struct ccu_div_data *ccu_div_get_data(struct device_node *np)
{
- struct ccu_div_data *data = to_ccu_div_data(rcdev);
- const struct ccu_div_rst_map *map;
- struct ccu_div *div;
- int idx, ret;
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi"))
+ return axi_data;
+ else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys"))
+ return sys_data;
- for (idx = 0, map = data->rst_map; idx < data->rst_num; ++idx, ++map) {
- if (map->rst_id == rst_id)
- break;
- }
- if (idx == data->rst_num) {
- pr_err("Invalid reset ID %lu specified\n", rst_id);
- return -EINVAL;
- }
+ pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np));
- div = ccu_div_find_desc(data, map->clk_id);
- if (IS_ERR(div)) {
- pr_err("Invalid clock ID %d in mapping\n", map->clk_id);
- return PTR_ERR(div);
- }
+ return NULL;
+}
- ret = ccu_div_reset_domain(div);
- if (ret) {
- pr_err("Reset isn't supported by divider %s\n",
- clk_hw_get_name(ccu_div_get_clk_hw(div)));
+static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
+ unsigned int clk_id)
+{
+ int idx;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ if (data->divs_info[idx].id == clk_id)
+ return data->divs[idx];
}
- return ret;
+ return ERR_PTR(-EINVAL);
}
-static const struct reset_control_ops ccu_div_rst_ops = {
- .reset = ccu_div_reset,
-};
-
static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
{
struct ccu_div_data *data;
@@ -308,13 +280,9 @@ static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) {
data->divs_num = ARRAY_SIZE(axi_info);
data->divs_info = axi_info;
- data->rst_num = ARRAY_SIZE(axi_rst_map);
- data->rst_map = axi_rst_map;
} else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) {
data->divs_num = ARRAY_SIZE(sys_info);
data->divs_info = sys_info;
- data->rst_num = ARRAY_SIZE(sys_rst_map);
- data->rst_map = sys_rst_map;
} else {
pr_err("Incompatible DT node '%s' specified\n",
of_node_full_name(np));
@@ -365,14 +333,16 @@ static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec,
clk_id = clkspec->args[0];
div = ccu_div_find_desc(data, clk_id);
if (IS_ERR(div)) {
- pr_info("Invalid clock ID %d specified\n", clk_id);
+ if (div != ERR_PTR(-EPROBE_DEFER))
+ pr_info("Invalid clock ID %d specified\n", clk_id);
+
return ERR_CAST(div);
}
return ccu_div_get_clk_hw(div);
}
-static int ccu_div_clk_register(struct ccu_div_data *data)
+static int ccu_div_clk_register(struct ccu_div_data *data, bool defer)
{
int idx, ret;
@@ -380,6 +350,13 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
const struct ccu_div_info *info = &data->divs_info[idx];
struct ccu_div_init_data init = {0};
+ if (!!(info->features & CCU_DIV_BASIC) ^ defer) {
+ if (!data->divs[idx])
+ data->divs[idx] = ERR_PTR(-EPROBE_DEFER);
+
+ continue;
+ }
+
init.id = info->id;
init.name = info->name;
init.parent_name = info->parent_name;
@@ -396,6 +373,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
init.base = info->base;
init.sys_regs = data->sys_regs;
init.divider = info->divider;
+ } else if (init.type == CCU_DIV_BUF) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
} else {
init.divider = info->divider;
}
@@ -409,49 +389,104 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
}
}
- ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
- if (ret) {
- pr_err("Couldn't register dividers '%s' clock provider\n",
- of_node_full_name(data->np));
- goto err_hw_unregister;
- }
-
return 0;
err_hw_unregister:
- for (--idx; idx >= 0; --idx)
+ for (--idx; idx >= 0; --idx) {
+ if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
+ continue;
+
ccu_div_hw_unregister(data->divs[idx]);
+ }
return ret;
}
-static void ccu_div_clk_unregister(struct ccu_div_data *data)
+static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- of_clk_del_provider(data->np);
+ /* Uninstall only the clocks registered on the specfied stage */
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
+ continue;
- for (idx = 0; idx < data->divs_num; ++idx)
ccu_div_hw_unregister(data->divs[idx]);
+ }
}
-static int ccu_div_rst_register(struct ccu_div_data *data)
+static int ccu_div_of_register(struct ccu_div_data *data)
{
int ret;
- data->rcdev.ops = &ccu_div_rst_ops;
- data->rcdev.of_node = data->np;
- data->rcdev.nr_resets = data->rst_num;
+ ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register dividers '%s' clock provider\n",
+ of_node_full_name(data->np));
+ }
+
+ return ret;
+}
- ret = reset_controller_register(&data->rcdev);
- if (ret)
+static int ccu_div_rst_register(struct ccu_div_data *data)
+{
+ struct ccu_rst_init_data init = {0};
+
+ init.sys_regs = data->sys_regs;
+ init.np = data->np;
+
+ data->rsts = ccu_rst_hw_register(&init);
+ if (IS_ERR(data->rsts)) {
pr_err("Couldn't register divider '%s' reset controller\n",
of_node_full_name(data->np));
+ return PTR_ERR(data->rsts);
+ }
+
+ return 0;
+}
+
+static int ccu_div_probe(struct platform_device *pdev)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = ccu_div_get_data(dev_of_node(&pdev->dev));
+ if (!data)
+ return -EINVAL;
+
+ ret = ccu_div_clk_register(data, false);
+ if (ret)
+ return ret;
+
+ ret = ccu_div_rst_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ return 0;
+
+err_clk_unregister:
+ ccu_div_clk_unregister(data, false);
return ret;
}
-static void ccu_div_init(struct device_node *np)
+static const struct of_device_id ccu_div_of_match[] = {
+ { .compatible = "baikal,bt1-ccu-axi" },
+ { .compatible = "baikal,bt1-ccu-sys" },
+ { }
+};
+
+static struct platform_driver ccu_div_driver = {
+ .probe = ccu_div_probe,
+ .driver = {
+ .name = "clk-ccu-div",
+ .of_match_table = ccu_div_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(ccu_div_driver);
+
+static __init void ccu_div_init(struct device_node *np)
{
struct ccu_div_data *data;
int ret;
@@ -464,22 +499,23 @@ static void ccu_div_init(struct device_node *np)
if (ret)
goto err_free_data;
- ret = ccu_div_clk_register(data);
+ ret = ccu_div_clk_register(data, true);
if (ret)
goto err_free_data;
- ret = ccu_div_rst_register(data);
+ ret = ccu_div_of_register(data);
if (ret)
goto err_clk_unregister;
+ ccu_div_set_data(data);
+
return;
err_clk_unregister:
- ccu_div_clk_unregister(data);
+ ccu_div_clk_unregister(data, true);
err_free_data:
ccu_div_free_data(data);
}
-
-CLK_OF_DECLARE(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
-CLK_OF_DECLARE(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
+CLK_OF_DECLARE_DRIVER(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
+CLK_OF_DECLARE_DRIVER(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index 2445d4b12baf..fce02ce77347 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
@@ -31,13 +32,14 @@
#define CCU_PCIE_PLL_BASE 0x018
#define CCU_ETH_PLL_BASE 0x020
-#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags) \
- { \
- .id = _id, \
- .name = _name, \
- .parent_name = _pname, \
- .base = _base, \
- .flags = _flags \
+#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags, _features) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .flags = _flags, \
+ .features = _features, \
}
#define CCU_PLL_NUM ARRAY_SIZE(pll_info)
@@ -48,6 +50,7 @@ struct ccu_pll_info {
const char *parent_name;
unsigned int base;
unsigned long flags;
+ unsigned long features;
};
/*
@@ -61,15 +64,15 @@ struct ccu_pll_info {
*/
static const struct ccu_pll_info pll_info[] = {
CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE,
- CLK_IS_CRITICAL),
+ CLK_IS_CRITICAL, CCU_PLL_BASIC),
CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0),
CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0),
CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE,
- CLK_IS_CRITICAL),
+ CLK_IS_CRITICAL, CCU_PLL_BASIC),
CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE)
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0)
};
struct ccu_pll_data {
@@ -78,16 +81,16 @@ struct ccu_pll_data {
struct ccu_pll *plls[CCU_PLL_NUM];
};
+static struct ccu_pll_data *pll_data;
+
static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data,
unsigned int clk_id)
{
- struct ccu_pll *pll;
int idx;
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
- pll = data->plls[idx];
- if (pll && pll->id == clk_id)
- return pll;
+ if (pll_info[idx].id == clk_id)
+ return data->plls[idx];
}
return ERR_PTR(-EINVAL);
@@ -133,14 +136,16 @@ static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec,
clk_id = clkspec->args[0];
pll = ccu_pll_find_desc(data, clk_id);
if (IS_ERR(pll)) {
- pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+ if (pll != ERR_PTR(-EPROBE_DEFER))
+ pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+
return ERR_CAST(pll);
}
return ccu_pll_get_clk_hw(pll);
}
-static int ccu_pll_clk_register(struct ccu_pll_data *data)
+static int ccu_pll_clk_register(struct ccu_pll_data *data, bool defer)
{
int idx, ret;
@@ -148,6 +153,14 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
const struct ccu_pll_info *info = &pll_info[idx];
struct ccu_pll_init_data init = {0};
+ /* Defer non-basic PLLs allocation for the probe stage */
+ if (!!(info->features & CCU_PLL_BASIC) ^ defer) {
+ if (!data->plls[idx])
+ data->plls[idx] = ERR_PTR(-EPROBE_DEFER);
+
+ continue;
+ }
+
init.id = info->id;
init.name = info->name;
init.parent_name = info->parent_name;
@@ -155,6 +168,7 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
init.sys_regs = data->sys_regs;
init.np = data->np;
init.flags = info->flags;
+ init.features = info->features;
data->plls[idx] = ccu_pll_hw_register(&init);
if (IS_ERR(data->plls[idx])) {
@@ -165,22 +179,70 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
}
}
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx) {
+ if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
+ continue;
+
+ ccu_pll_hw_unregister(data->plls[idx]);
+ }
+
+ return ret;
+}
+
+static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
+{
+ int idx;
+
+ /* Uninstall only the clocks registered on the specfied stage */
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
+ continue;
+
+ ccu_pll_hw_unregister(data->plls[idx]);
+ }
+}
+
+static int ccu_pll_of_register(struct ccu_pll_data *data)
+{
+ int ret;
+
ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data);
if (ret) {
pr_err("Couldn't register PLL provider of '%s'\n",
of_node_full_name(data->np));
- goto err_hw_unregister;
}
- return 0;
+ return ret;
+}
-err_hw_unregister:
- for (--idx; idx >= 0; --idx)
- ccu_pll_hw_unregister(data->plls[idx]);
+static int ccu_pll_probe(struct platform_device *pdev)
+{
+ struct ccu_pll_data *data = pll_data;
- return ret;
+ if (!data)
+ return -EINVAL;
+
+ return ccu_pll_clk_register(data, false);
}
+static const struct of_device_id ccu_pll_of_match[] = {
+ { .compatible = "baikal,bt1-ccu-pll" },
+ { }
+};
+
+static struct platform_driver ccu_pll_driver = {
+ .probe = ccu_pll_probe,
+ .driver = {
+ .name = "clk-ccu-pll",
+ .of_match_table = ccu_pll_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(ccu_pll_driver);
+
static __init void ccu_pll_init(struct device_node *np)
{
struct ccu_pll_data *data;
@@ -194,13 +256,22 @@ static __init void ccu_pll_init(struct device_node *np)
if (ret)
goto err_free_data;
- ret = ccu_pll_clk_register(data);
+ ret = ccu_pll_clk_register(data, true);
if (ret)
goto err_free_data;
+ ret = ccu_pll_of_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ pll_data = data;
+
return;
+err_clk_unregister:
+ ccu_pll_clk_unregister(data, true);
+
err_free_data:
ccu_pll_free_data(data);
}
-CLK_OF_DECLARE(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
+CLK_OF_DECLARE_DRIVER(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 48a1eb9f2d55..e74fe6219d14 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -502,6 +503,8 @@ struct bcm2835_clock_data {
bool low_jitter;
u32 tcnt_mux;
+
+ bool round_up;
};
struct bcm2835_gate_data {
@@ -966,9 +969,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
return div;
}
-static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
- unsigned long parent_rate,
- u32 div)
+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
{
const struct bcm2835_clock_data *data = clock->data;
u64 temp;
@@ -993,12 +996,34 @@ static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
return temp;
}
+static unsigned long bcm2835_round_rate(unsigned long rate)
+{
+ unsigned long scaler;
+ unsigned long limit;
+
+ limit = rate / 100000;
+
+ scaler = 1;
+ while (scaler < limit)
+ scaler *= 10;
+
+ /*
+ * If increasing a clock by less than 0.1% changes it
+ * from ..999.. to ..000.., round up.
+ */
+ if ((rate + scaler - 1) / scaler % 1000 == 0)
+ rate = roundup(rate, scaler);
+
+ return rate;
+}
+
static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
const struct bcm2835_clock_data *data = clock->data;
+ unsigned long rate;
u32 div;
if (data->int_bits == 0 && data->frac_bits == 0)
@@ -1006,7 +1031,12 @@ static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
div = cprman_read(cprman, data->div_reg);
- return bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+ rate = bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+
+ if (data->round_up)
+ rate = bcm2835_round_rate(rate);
+
+ return rate;
}
static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock)
@@ -1784,7 +1814,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.load_mask = CM_PLLC_LOADPER,
.hold_mask = CM_PLLC_HOLDPER,
.fixed_divider = 1,
- .flags = CLK_SET_RATE_PARENT),
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/*
* PLLD is the display PLL, used to drive DSI display panels.
@@ -2143,7 +2173,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.div_reg = CM_UARTDIV,
.int_bits = 10,
.frac_bits = 12,
- .tcnt_mux = 28),
+ .tcnt_mux = 28,
+ .round_up = true),
/* TV encoder clock. Only operating frequency is 108Mhz. */
[BCM2835_CLOCK_VEC] = REGISTER_PER_CLK(
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 876b37b8683c..679f4649a7ef 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -33,6 +33,7 @@ enum rpi_firmware_clk_id {
RPI_FIRMWARE_EMMC2_CLK_ID,
RPI_FIRMWARE_M2MC_CLK_ID,
RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
+ RPI_FIRMWARE_VEC_CLK_ID,
RPI_FIRMWARE_NUM_CLK_ID,
};
@@ -51,6 +52,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_EMMC2_CLK_ID] = "emmc2",
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
+ [RPI_FIRMWARE_VEC_CLK_ID] = "vec",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -129,9 +131,18 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
},
+ [RPI_FIRMWARE_PIXEL_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_HEVC_CLK_ID] = {
+ .export = true,
+ },
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
},
+ [RPI_FIRMWARE_VEC_CLK_ID] = {
+ .export = true,
+ },
};
/*
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index bccdfa00fd37..67a9edbba29c 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!gbase)
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index e9518d35f262..dd2784bb75b6 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
+ of_node_put(parent_np);
pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
+ of_node_put(parent_np);
if (!cpupll_base) {
pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index bacebd457e6f..8b3c059e19a1 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -80,7 +80,7 @@ struct asm9260_mux_clock {
u8 mask;
u32 *table;
const char *name;
- const char **parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long offset;
unsigned long flags;
@@ -232,10 +232,10 @@ static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
HW_AHBCLKCTRL1, 16 },
};
-static const char __initdata *main_mux_p[] = { NULL, NULL };
-static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"};
-static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"};
-static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static struct clk_parent_data __initdata main_mux_p[] = { { .index = 0, }, { .name = "pll" } };
+static struct clk_parent_data __initdata i2s0_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s0m_div"} };
+static struct clk_parent_data __initdata i2s1_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s1m_div"} };
+static struct clk_parent_data __initdata clkout_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "rtc"} };
static u32 three_mux_table[] = {0, 1, 3};
static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
@@ -255,9 +255,10 @@ static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
static void __init asm9260_acc_init(struct device_node *np)
{
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_hw;
struct clk_hw **hws;
- const char *ref_clk, *pll_clk = "pll";
+ const char *pll_clk = "pll";
+ struct clk_parent_data pll_parent_data = { .index = 0 };
u32 rate;
int n;
@@ -274,21 +275,15 @@ static void __init asm9260_acc_init(struct device_node *np)
/* register pll */
rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
- /* TODO: Convert to DT parent scheme */
- ref_clk = of_clk_get_parent_name(np, 0);
- hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
- ref_clk, NULL, NULL, 0, rate, 0,
- CLK_FIXED_RATE_PARENT_ACCURACY);
-
- if (IS_ERR(hw))
+ pll_hw = clk_hw_register_fixed_rate_parent_accuracy(NULL, pll_clk, &pll_parent_data,
+ 0, rate);
+ if (IS_ERR(pll_hw))
panic("%pOFn: can't register REFCLK. Check DT!", np);
for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
- mc->parent_names[0] = ref_clk;
- mc->parent_names[1] = pll_clk;
- hw = clk_hw_register_mux_table(NULL, mc->name, mc->parent_names,
+ hw = clk_hw_register_mux_table_parent_data(NULL, mc->name, mc->parent_data,
mc->num_parents, mc->flags, base + mc->offset,
0, mc->mask, 0, mc->table, &asm9260_clk_lock);
}
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 24dab2312bc6..9c3305bcb27a 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -622,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
- hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
+ hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index b9c5f904f535..edfa94641bbf 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -85,10 +85,11 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
- struct clk_rate_request tmp_req = *req;
+ struct clk_rate_request tmp_req;
parent = clk_hw_get_parent(mux_hw);
+ clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
@@ -104,12 +105,13 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
}
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
- struct clk_rate_request tmp_req = *req;
+ struct clk_rate_request tmp_req;
parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
+ clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index f6b2bf558486..a2c2b5203b0a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -386,13 +386,13 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table,
u8 width, unsigned long flags)
{
- struct clk_rate_request req = {
- .rate = rate,
- .best_parent_rate = *prate,
- .best_parent_hw = parent,
- };
+ struct clk_rate_request req;
int ret;
+ clk_hw_init_rate_request(hw, &req, rate);
+ req.best_parent_rate = *prate;
+ req.best_parent_hw = parent;
+
ret = divider_determine_rate(hw, &req, table, width, flags);
if (ret)
return ret;
@@ -408,13 +408,13 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val)
{
- struct clk_rate_request req = {
- .rate = rate,
- .best_parent_rate = *prate,
- .best_parent_hw = parent,
- };
+ struct clk_rate_request req;
int ret;
+ clk_hw_init_rate_request(hw, &req, rate);
+ req.best_parent_rate = *prate;
+ req.best_parent_hw = parent;
+
ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
if (ret)
return ret;
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index ac68a6b40f0e..7d775954e26d 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -49,12 +49,24 @@ const struct clk_ops clk_fixed_rate_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+static void devm_clk_hw_register_fixed_rate_release(struct device *dev, void *res)
+{
+ struct clk_fixed_rate *fix = res;
+
+ /*
+ * We can not use clk_hw_unregister_fixed_rate, since it will kfree()
+ * the hw, resulting in double free. Just unregister the hw and let
+ * devres code kfree() it.
+ */
+ clk_hw_unregister(&fix->hw);
+}
+
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags)
+ unsigned long clk_fixed_flags, bool devm)
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
@@ -62,7 +74,11 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
int ret = -EINVAL;
/* allocate fixed-rate clock */
- fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (devm)
+ fixed = devres_alloc(devm_clk_hw_register_fixed_rate_release,
+ sizeof(*fixed), GFP_KERNEL);
+ else
+ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
if (!fixed)
return ERR_PTR(-ENOMEM);
@@ -90,9 +106,13 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
else
ret = of_clk_hw_register(np, hw);
if (ret) {
- kfree(fixed);
+ if (devm)
+ devres_free(fixed);
+ else
+ kfree(fixed);
hw = ERR_PTR(ret);
- }
+ } else if (devm)
+ devres_add(dev, fixed);
return hw;
}
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 81cb90955d68..460e7216bfa1 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -286,7 +286,7 @@ static struct platform_driver lan966x_clk_driver = {
.of_match_table = lan966x_clk_dt_ids,
},
};
-builtin_platform_driver(lan966x_clk_driver);
+module_platform_driver(lan966x_clk_driver);
MODULE_AUTHOR("Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>");
MODULE_DESCRIPTION("LAN966X clock driver");
diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c
index 565bcd0cdde9..80944bf482e9 100644
--- a/drivers/clk/clk-lochnagar.c
+++ b/drivers/clk/clk-lochnagar.c
@@ -19,7 +19,7 @@
#include <linux/mfd/lochnagar1_regs.h>
#include <linux/mfd/lochnagar2_regs.h>
-#include <dt-bindings/clk/lochnagar.h>
+#include <dt-bindings/clock/lochnagar.h>
#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1)
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index bad2677e11ae..71fbe687fa7b 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -99,7 +99,7 @@ static void __init nomadik_src_init(void)
if (!src_base) {
pr_err("%s: must have src parent node with REGS (%pOFn)\n",
__func__, np);
- return;
+ goto out_put;
}
/* Set all timers to use the 2.4 MHz TIMCLK */
@@ -132,6 +132,9 @@ static void __init nomadik_src_init(void)
}
writel(val, src_base + SRC_XTALCR);
register_reboot_notifier(&nomadik_clk_reboot_notifier);
+
+out_put:
+ of_node_put(np);
}
/**
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index e677bb5a784b..e319cfa51a8a 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -129,20 +129,6 @@ npcm7xx_clk_register_pll(void __iomem *pllcon, const char *name,
#define NPCM7XX_SECCNT (0x68)
#define NPCM7XX_CNTR25M (0x6C)
-struct npcm7xx_clk_gate_data {
- u32 reg;
- u8 bit_idx;
- const char *name;
- const char *parent_name;
- unsigned long flags;
- /*
- * If this clock is exported via DT, set onecell_idx to constant
- * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
- * this specific clock. Otherwise, set to -1.
- */
- int onecell_idx;
-};
-
struct npcm7xx_clk_mux_data {
u8 shift;
u8 mask;
@@ -160,21 +146,6 @@ struct npcm7xx_clk_mux_data {
};
-struct npcm7xx_clk_div_fixed_data {
- u8 mult;
- u8 div;
- const char *name;
- const char *parent_name;
- u8 clk_divider_flags;
- /*
- * If this clock is exported via DT, set onecell_idx to constant
- * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
- * this specific clock. Otherwise, set to -1.
- */
- int onecell_idx;
-};
-
-
struct npcm7xx_clk_div_data {
u32 reg;
u8 shift;
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index cda5e258355b..584e293156ad 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = {
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *parent_np;
const struct oxnas_stdclk_data *data;
struct regmap *regmap;
int ret;
@@ -215,7 +215,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 88898b97a443..5eddb9f0d6bd 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1063,8 +1063,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
*/
static void __init legacy_init_clockgen(struct device_node *np)
{
- if (!clockgen.node)
- _clockgen_init(of_get_parent(np), true);
+ if (!clockgen.node) {
+ struct device_node *parent_np;
+
+ parent_np = of_get_parent(np);
+ _clockgen_init(parent_np, true);
+ of_node_put(parent_np);
+ }
}
/* Legacy node */
@@ -1159,6 +1164,7 @@ static struct clk * __init create_sysclk(const char *name)
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
if (sysclk) {
clk = sysclk_from_fixed(sysclk, name);
+ of_node_put(sysclk);
if (!IS_ERR(clk))
return clk;
}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 657493ecce4c..88689415aff9 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -24,7 +24,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <dt-bindings/clk/versaclock.h>
+#include <dt-bindings/clock/versaclock.h>
/* VersaClock5 registers */
#define VC5_OTP_CONTROL 0x00
@@ -153,6 +153,7 @@ enum vc5_model {
IDT_VC5_5P49V5935,
IDT_VC6_5P49V6901,
IDT_VC6_5P49V6965,
+ IDT_VC6_5P49V6975,
};
/* Structure to describe features of a particular VC5 model */
@@ -230,8 +231,12 @@ static unsigned char vc5_mux_get_parent(struct clk_hw *hw)
container_of(hw, struct vc5_driver_data, clk_mux);
const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN;
unsigned int src;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src);
src &= mask;
if (src == VC5_PRIM_SRC_SHDN_EN_XTAL)
@@ -286,8 +291,12 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mul);
unsigned int premul;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
if (premul & VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ)
parent_rate *= 2;
@@ -315,11 +324,9 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
else
mask = 0;
- regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
- VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
- mask);
-
- return 0;
+ return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
+ VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
+ mask);
}
static const struct clk_ops vc5_dbl_ops = {
@@ -334,14 +341,19 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned int prediv, div;
+ int ret;
- regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
+ ret = regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
+ if (ret)
+ return 0;
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV)
return parent_rate;
- regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div);
+ ret = regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div);
+ if (ret)
+ return 0;
/* The Sel_prediv2 is set, PLL fed from prediv2 (Ref_in / 2) */
if (div & VC5_REF_DIVIDER_SEL_PREDIV2)
@@ -376,15 +388,17 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned long idiv;
+ int ret;
u8 div;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (parent_rate <= 50000000) {
- regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
- regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00);
- return 0;
+ ret = regmap_set_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00);
}
idiv = DIV_ROUND_UP(parent_rate, rate);
@@ -395,11 +409,12 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
else
div = VC5_REF_DIVIDER_REF_DIV(idiv);
- regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div);
- regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV, 0);
+ ret = regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
}
static const struct clk_ops vc5_pfd_ops = {
@@ -551,9 +566,12 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
hwdata->div_int >> 4, hwdata->div_int << 4,
0
};
+ int ret;
- regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
- data, 14);
+ ret = regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
+ data, 14);
+ if (ret)
+ return ret;
/*
* Toggle magic bit in undocumented register for unknown reason.
@@ -561,12 +579,13 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
* datasheet somewhat implies this is needed, but the register
* and the bit is not documented.
*/
- regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET, 0);
- regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET);
- return 0;
+ ret = regmap_clear_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET);
}
static const struct clk_ops vc5_fod_ops = {
@@ -594,10 +613,9 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
* registers.
*/
if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
- ret = regmap_update_bits(vc5->regmap,
- VC5_RESERVED_X0(hwdata->num),
- VC5_RESERVED_X0_BYPASS_SYNC,
- VC5_RESERVED_X0_BYPASS_SYNC);
+ ret = regmap_set_bits(vc5->regmap,
+ VC5_RESERVED_X0(hwdata->num),
+ VC5_RESERVED_X0_BYPASS_SYNC);
if (ret)
return ret;
}
@@ -606,7 +624,10 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
* If the input mux is disabled, enable it first and
* select source from matching FOD.
*/
- regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ if (ret)
+ return ret;
+
if ((src & mask) == 0) {
src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD;
ret = regmap_update_bits(vc5->regmap,
@@ -617,18 +638,22 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
}
/* Enable the clock buffer */
- regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF,
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
+ ret = regmap_set_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
+ if (ret)
+ return ret;
+
if (hwdata->clk_output_cfg0_mask) {
dev_dbg(&vc5->client->dev, "Update output %d mask 0x%0X val 0x%0X\n",
hwdata->num, hwdata->clk_output_cfg0_mask,
hwdata->clk_output_cfg0);
- regmap_update_bits(vc5->regmap,
- VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
- hwdata->clk_output_cfg0_mask,
- hwdata->clk_output_cfg0);
+ ret = regmap_update_bits(vc5->regmap,
+ VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
+ hwdata->clk_output_cfg0_mask,
+ hwdata->clk_output_cfg0);
+ if (ret)
+ return ret;
}
return 0;
@@ -640,8 +665,8 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw)
struct vc5_driver_data *vc5 = hwdata->vc5;
/* Disable the clock buffer */
- regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0);
+ regmap_clear_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
}
static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
@@ -656,8 +681,12 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT;
unsigned int src;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
src &= mask;
if (src == 0) /* Input mux set to DISABLED */
@@ -725,6 +754,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
case IDT_VC5_5P49V5935:
case IDT_VC6_5P49V6901:
case IDT_VC6_5P49V6965:
+ case IDT_VC6_5P49V6975:
default:
return n;
}
@@ -819,22 +849,27 @@ static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data
{
u32 value;
int mapped_value;
+ int ret;
- if (!of_property_read_u32(node, "idt,xtal-load-femtofarads", &value)) {
- mapped_value = vc5_map_cap_value(value);
- if (mapped_value < 0)
- return mapped_value;
-
- /*
- * The mapped_value is really the high 6 bits of
- * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
- * shift the value 2 places.
- */
- regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03, mapped_value << 2);
- regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03, mapped_value << 2);
- }
+ if (of_property_read_u32(node, "idt,xtal-load-femtofarads", &value))
+ return 0;
- return 0;
+ mapped_value = vc5_map_cap_value(value);
+ if (mapped_value < 0)
+ return mapped_value;
+
+ /*
+ * The mapped_value is really the high 6 bits of
+ * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
+ * shift the value 2 places.
+ */
+ ret = regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03,
+ mapped_value << 2);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03,
+ mapped_value << 2);
}
static int vc5_update_slew(struct device_node *np_output,
@@ -956,7 +991,10 @@ static int vc5_probe(struct i2c_client *client)
"could not read idt,output-enable-active\n");
}
- regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask, src_val);
+ ret = regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask,
+ src_val);
+ if (ret)
+ return ret;
/* Register clock input mux */
memset(&init, 0, sizeof(init));
@@ -1202,7 +1240,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.model = IDT_VC6_5P49V6901,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
- .flags = VC5_HAS_PFD_FREQ_DBL,
+ .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
};
static const struct vc5_chip_info idt_5p49v6965_info = {
@@ -1212,6 +1250,13 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
.flags = VC5_HAS_BYPASS_SYNC_BIT,
};
+static const struct vc5_chip_info idt_5p49v6975_info = {
+ .model = IDT_VC6_5P49V6975,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_BYPASS_SYNC_BIT | VC5_HAS_INTERNAL_XTAL,
+};
+
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
@@ -1219,6 +1264,7 @@ static const struct i2c_device_id vc5_id[] = {
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
{ "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
+ { "5p49v6975", .driver_data = IDT_VC6_5P49V6975 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
@@ -1230,6 +1276,7 @@ static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
{ .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
+ { .compatible = "idt,5p49v6975", .data = &idt_5p49v6975_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
new file mode 100644
index 000000000000..8e4f86e852aa
--- /dev/null
+++ b/drivers/clk/clk-versaclock7.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common clock framework driver for the Versaclock7 family of timing devices.
+ *
+ * Copyright (c) 2022 Renesas Electronics Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/swab.h>
+
+/*
+ * 16-bit register address: the lower 8 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define VC7_PAGE_ADDR 0xFD
+#define VC7_PAGE_WINDOW 256
+#define VC7_MAX_REG 0x364
+
+/* Maximum number of banks supported by VC7 */
+#define VC7_NUM_BANKS 7
+
+/* Maximum number of FODs supported by VC7 */
+#define VC7_NUM_FOD 3
+
+/* Maximum number of IODs supported by VC7 */
+#define VC7_NUM_IOD 4
+
+/* Maximum number of outputs supported by VC7 */
+#define VC7_NUM_OUT 12
+
+/* VCO valid range is 9.5 GHz to 10.7 GHz */
+#define VC7_APLL_VCO_MIN 9500000000UL
+#define VC7_APLL_VCO_MAX 10700000000UL
+
+/* APLL denominator is fixed at 2^27 */
+#define VC7_APLL_DENOMINATOR_BITS 27
+
+/* FOD 1st stage denominator is fixed 2^34 */
+#define VC7_FOD_DENOMINATOR_BITS 34
+
+/* IOD can operate between 1kHz and 650MHz */
+#define VC7_IOD_RATE_MIN 1000UL
+#define VC7_IOD_RATE_MAX 650000000UL
+#define VC7_IOD_MIN_DIVISOR 14
+#define VC7_IOD_MAX_DIVISOR 0x1ffffff /* 25-bit */
+
+#define VC7_FOD_RATE_MIN 1000UL
+#define VC7_FOD_RATE_MAX 650000000UL
+#define VC7_FOD_1ST_STAGE_RATE_MIN 33000000UL /* 33 MHz */
+#define VC7_FOD_1ST_STAGE_RATE_MAX 650000000UL /* 650 MHz */
+#define VC7_FOD_1ST_INT_MAX 324
+#define VC7_FOD_2ND_INT_MIN 2
+#define VC7_FOD_2ND_INT_MAX 0x1ffff /* 17-bit */
+
+/* VC7 Registers */
+
+#define VC7_REG_XO_CNFG 0x2C
+#define VC7_REG_XO_CNFG_COUNT 4
+#define VC7_REG_XO_IB_H_DIV_SHIFT 24
+#define VC7_REG_XO_IB_H_DIV_MASK GENMASK(28, VC7_REG_XO_IB_H_DIV_SHIFT)
+
+#define VC7_REG_APLL_FB_DIV_FRAC 0x120
+#define VC7_REG_APLL_FB_DIV_FRAC_COUNT 4
+#define VC7_REG_APLL_FB_DIV_FRAC_MASK GENMASK(26, 0)
+
+#define VC7_REG_APLL_FB_DIV_INT 0x124
+#define VC7_REG_APLL_FB_DIV_INT_COUNT 2
+#define VC7_REG_APLL_FB_DIV_INT_MASK GENMASK(9, 0)
+
+#define VC7_REG_APLL_CNFG 0x127
+#define VC7_REG_APLL_EN_DOUBLER BIT(0)
+
+#define VC7_REG_OUT_BANK_CNFG(idx) (0x280 + (0x4 * (idx)))
+#define VC7_REG_OUTPUT_BANK_SRC_MASK GENMASK(2, 0)
+
+#define VC7_REG_FOD_INT_CNFG(idx) (0x1E0 + (0x10 * (idx)))
+#define VC7_REG_FOD_INT_CNFG_COUNT 8
+#define VC7_REG_FOD_1ST_INT_MASK GENMASK(8, 0)
+#define VC7_REG_FOD_2ND_INT_SHIFT 9
+#define VC7_REG_FOD_2ND_INT_MASK GENMASK(25, VC7_REG_FOD_2ND_INT_SHIFT)
+#define VC7_REG_FOD_FRAC_SHIFT 26
+#define VC7_REG_FOD_FRAC_MASK GENMASK_ULL(59, VC7_REG_FOD_FRAC_SHIFT)
+
+#define VC7_REG_IOD_INT_CNFG(idx) (0x1C0 + (0x8 * (idx)))
+#define VC7_REG_IOD_INT_CNFG_COUNT 4
+#define VC7_REG_IOD_INT_MASK GENMASK(24, 0)
+
+#define VC7_REG_ODRV_EN(idx) (0x240 + (0x4 * (idx)))
+#define VC7_REG_OUT_DIS BIT(0)
+
+struct vc7_driver_data;
+static const struct regmap_config vc7_regmap_config;
+
+/* Supported Renesas VC7 models */
+enum vc7_model {
+ VC7_RC21008A,
+};
+
+struct vc7_chip_info {
+ const enum vc7_model model;
+ const unsigned int banks[VC7_NUM_BANKS];
+ const unsigned int num_banks;
+ const unsigned int outputs[VC7_NUM_OUT];
+ const unsigned int num_outputs;
+};
+
+/*
+ * Changing the APLL frequency is currently not supported.
+ * The APLL will consist of an opaque block between the XO and FOD/IODs and
+ * its frequency will be computed based on the current state of the device.
+ */
+struct vc7_apll_data {
+ struct clk *clk;
+ struct vc7_driver_data *vc7;
+ u8 xo_ib_h_div;
+ u8 en_doubler;
+ u16 apll_fb_div_int;
+ u32 apll_fb_div_frac;
+};
+
+struct vc7_fod_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ u32 fod_1st_int;
+ u32 fod_2nd_int;
+ u64 fod_frac;
+};
+
+struct vc7_iod_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ u32 iod_int;
+};
+
+struct vc7_out_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ unsigned int out_dis;
+};
+
+struct vc7_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ const struct vc7_chip_info *chip_info;
+
+ struct clk *pin_xin;
+ struct vc7_apll_data clk_apll;
+ struct vc7_fod_data clk_fod[VC7_NUM_FOD];
+ struct vc7_iod_data clk_iod[VC7_NUM_IOD];
+ struct vc7_out_data clk_out[VC7_NUM_OUT];
+};
+
+struct vc7_bank_src_map {
+ enum vc7_bank_src_type {
+ VC7_FOD,
+ VC7_IOD,
+ } type;
+ union _divider {
+ struct vc7_iod_data *iod;
+ struct vc7_fod_data *fod;
+ } src;
+};
+
+static struct clk_hw *vc7_of_clk_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct vc7_driver_data *vc7 = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= vc7->chip_info->num_outputs)
+ return ERR_PTR(-EINVAL);
+
+ return &vc7->clk_out[idx].hw;
+}
+
+static const unsigned int RC21008A_index_to_output_mapping[] = {
+ 1, 2, 3, 6, 7, 8, 10, 11
+};
+
+static int vc7_map_index_to_output(const enum vc7_model model, const unsigned int i)
+{
+ switch (model) {
+ case VC7_RC21008A:
+ return RC21008A_index_to_output_mapping[i];
+ default:
+ return i;
+ }
+}
+
+/* bank to output mapping, same across all variants */
+static const unsigned int output_bank_mapping[] = {
+ 0, /* Output 0 */
+ 1, /* Output 1 */
+ 2, /* Output 2 */
+ 2, /* Output 3 */
+ 3, /* Output 4 */
+ 3, /* Output 5 */
+ 3, /* Output 6 */
+ 3, /* Output 7 */
+ 4, /* Output 8 */
+ 4, /* Output 9 */
+ 5, /* Output 10 */
+ 6 /* Output 11 */
+};
+
+/**
+ * vc7_64_mul_64_to_128() - Multiply two u64 and return an unsigned 128-bit integer
+ * as an upper and lower part.
+ *
+ * @left: The left argument.
+ * @right: The right argument.
+ * @hi: The upper 64-bits of the 128-bit product.
+ * @lo: The lower 64-bits of the 128-bit product.
+ *
+ * From mul_64_64 in crypto/ecc.c:350 in the linux kernel, accessed in v5.17.2.
+ */
+static void vc7_64_mul_64_to_128(u64 left, u64 right, u64 *hi, u64 *lo)
+{
+ u64 a0 = left & 0xffffffffull;
+ u64 a1 = left >> 32;
+ u64 b0 = right & 0xffffffffull;
+ u64 b1 = right >> 32;
+ u64 m0 = a0 * b0;
+ u64 m1 = a0 * b1;
+ u64 m2 = a1 * b0;
+ u64 m3 = a1 * b1;
+
+ m2 += (m0 >> 32);
+ m2 += m1;
+
+ /* Overflow */
+ if (m2 < m1)
+ m3 += 0x100000000ull;
+
+ *lo = (m0 & 0xffffffffull) | (m2 << 32);
+ *hi = m3 + (m2 >> 32);
+}
+
+/**
+ * vc7_128_div_64_to_64() - Divides a 128-bit uint by a 64-bit divisor, return a 64-bit quotient.
+ *
+ * @numhi: The uppper 64-bits of the dividend.
+ * @numlo: The lower 64-bits of the dividend.
+ * @den: The denominator (divisor).
+ * @r: The remainder, pass NULL if the remainder is not needed.
+ *
+ * Originally from libdivide, modified to use kernel u64/u32 types.
+ *
+ * See https://github.com/ridiculousfish/libdivide/blob/master/libdivide.h#L471.
+ *
+ * Return: The 64-bit quotient of the division.
+ *
+ * In case of overflow of division by zero, max(u64) is returned.
+ */
+static u64 vc7_128_div_64_to_64(u64 numhi, u64 numlo, u64 den, u64 *r)
+{
+ /*
+ * We work in base 2**32.
+ * A uint32 holds a single digit. A uint64 holds two digits.
+ * Our numerator is conceptually [num3, num2, num1, num0].
+ * Our denominator is [den1, den0].
+ */
+ const u64 b = ((u64)1 << 32);
+
+ /* The high and low digits of our computed quotient. */
+ u32 q1, q0;
+
+ /* The normalization shift factor */
+ int shift;
+
+ /*
+ * The high and low digits of our denominator (after normalizing).
+ * Also the low 2 digits of our numerator (after normalizing).
+ */
+ u32 den1, den0, num1, num0;
+
+ /* A partial remainder; */
+ u64 rem;
+
+ /*
+ * The estimated quotient, and its corresponding remainder (unrelated
+ * to true remainder).
+ */
+ u64 qhat, rhat;
+
+ /* Variables used to correct the estimated quotient. */
+ u64 c1, c2;
+
+ /* Check for overflow and divide by 0. */
+ if (numhi >= den) {
+ if (r)
+ *r = ~0ull;
+ return ~0ull;
+ }
+
+ /*
+ * Determine the normalization factor. We multiply den by this, so that
+ * its leading digit is at least half b. In binary this means just
+ * shifting left by the number of leading zeros, so that there's a 1 in
+ * the MSB.
+ *
+ * We also shift numer by the same amount. This cannot overflow because
+ * numhi < den. The expression (-shift & 63) is the same as (64 -
+ * shift), except it avoids the UB of shifting by 64. The funny bitwise
+ * 'and' ensures that numlo does not get shifted into numhi if shift is
+ * 0. clang 11 has an x86 codegen bug here: see LLVM bug 50118. The
+ * sequence below avoids it.
+ */
+ shift = __builtin_clzll(den);
+ den <<= shift;
+ numhi <<= shift;
+ numhi |= (numlo >> (-shift & 63)) & (-(s64)shift >> 63);
+ numlo <<= shift;
+
+ /*
+ * Extract the low digits of the numerator and both digits of the
+ * denominator.
+ */
+ num1 = (u32)(numlo >> 32);
+ num0 = (u32)(numlo & 0xFFFFFFFFu);
+ den1 = (u32)(den >> 32);
+ den0 = (u32)(den & 0xFFFFFFFFu);
+
+ /*
+ * We wish to compute q1 = [n3 n2 n1] / [d1 d0].
+ * Estimate q1 as [n3 n2] / [d1], and then correct it.
+ * Note while qhat may be 2 digits, q1 is always 1 digit.
+ */
+ qhat = div64_u64_rem(numhi, den1, &rhat);
+ c1 = qhat * den0;
+ c2 = rhat * b + num1;
+ if (c1 > c2)
+ qhat -= (c1 - c2 > den) ? 2 : 1;
+ q1 = (u32)qhat;
+
+ /* Compute the true (partial) remainder. */
+ rem = numhi * b + num1 - q1 * den;
+
+ /*
+ * We wish to compute q0 = [rem1 rem0 n0] / [d1 d0].
+ * Estimate q0 as [rem1 rem0] / [d1] and correct it.
+ */
+ qhat = div64_u64_rem(rem, den1, &rhat);
+ c1 = qhat * den0;
+ c2 = rhat * b + num0;
+ if (c1 > c2)
+ qhat -= (c1 - c2 > den) ? 2 : 1;
+ q0 = (u32)qhat;
+
+ /* Return remainder if requested. */
+ if (r)
+ *r = (rem * b + num0 - q0 * den) >> shift;
+ return ((u64)q1 << 32) | q0;
+}
+
+static int vc7_get_bank_clk(struct vc7_driver_data *vc7,
+ unsigned int bank_idx,
+ unsigned int output_bank_src,
+ struct vc7_bank_src_map *map)
+{
+ /* Mapping from Table 38 in datasheet */
+ if (bank_idx == 0 || bank_idx == 1) {
+ switch (output_bank_src) {
+ case 0:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[0];
+ return 0;
+ case 1:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[1];
+ return 0;
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 2) {
+ switch (output_bank_src) {
+ case 1:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[1];
+ return 0;
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 3) {
+ switch (output_bank_src) {
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 4) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ } else if (bank_idx == 5) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 1:
+ /* XIN_REFIN not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 3:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[3];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ } else if (bank_idx == 6) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 3:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[3];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ }
+
+ pr_warn("bank_src%d = %d is not supported\n", bank_idx, output_bank_src);
+ return -1;
+}
+
+static int vc7_read_apll(struct vc7_driver_data *vc7)
+{
+ int err;
+ u32 val32;
+ u16 val16;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_XO_CNFG,
+ (u32 *)&val32,
+ VC7_REG_XO_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read XO_CNFG\n");
+ return err;
+ }
+
+ vc7->clk_apll.xo_ib_h_div = (val32 & VC7_REG_XO_IB_H_DIV_MASK)
+ >> VC7_REG_XO_IB_H_DIV_SHIFT;
+
+ err = regmap_read(vc7->regmap,
+ VC7_REG_APLL_CNFG,
+ &val32);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_CNFG\n");
+ return err;
+ }
+
+ vc7->clk_apll.en_doubler = val32 & VC7_REG_APLL_EN_DOUBLER;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_APLL_FB_DIV_FRAC,
+ (u32 *)&val32,
+ VC7_REG_APLL_FB_DIV_FRAC_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_FRAC\n");
+ return err;
+ }
+
+ vc7->clk_apll.apll_fb_div_frac = val32 & VC7_REG_APLL_FB_DIV_FRAC_MASK;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_APLL_FB_DIV_INT,
+ (u16 *)&val16,
+ VC7_REG_APLL_FB_DIV_INT_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_INT\n");
+ return err;
+ }
+
+ vc7->clk_apll.apll_fb_div_int = val16 & VC7_REG_APLL_FB_DIV_INT_MASK;
+
+ return 0;
+}
+
+static int vc7_read_fod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u64 val;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ VC7_REG_FOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
+ return err;
+ }
+
+ vc7->clk_fod[idx].fod_1st_int = (val & VC7_REG_FOD_1ST_INT_MASK);
+ vc7->clk_fod[idx].fod_2nd_int =
+ (val & VC7_REG_FOD_2ND_INT_MASK) >> VC7_REG_FOD_2ND_INT_SHIFT;
+ vc7->clk_fod[idx].fod_frac = (val & VC7_REG_FOD_FRAC_MASK)
+ >> VC7_REG_FOD_FRAC_SHIFT;
+
+ return 0;
+}
+
+static int vc7_write_fod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u64 val;
+
+ /*
+ * FOD dividers are part of an atomic group where fod_1st_int,
+ * fod_2nd_int, and fod_frac must be written together. The new divider
+ * is applied when the MSB of fod_frac is written.
+ */
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ VC7_REG_FOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
+ return err;
+ }
+
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_1st_int,
+ VC7_REG_FOD_1ST_INT_MASK);
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_2nd_int,
+ VC7_REG_FOD_2ND_INT_MASK);
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_frac,
+ VC7_REG_FOD_FRAC_MASK);
+
+ err = regmap_bulk_write(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ sizeof(u64));
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write FOD%d\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vc7_read_iod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u32 val;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ VC7_REG_IOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
+ return err;
+ }
+
+ vc7->clk_iod[idx].iod_int = (val & VC7_REG_IOD_INT_MASK);
+
+ return 0;
+}
+
+static int vc7_write_iod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u32 val;
+
+ /*
+ * IOD divider field is atomic and all bits must be written.
+ * The new divider is applied when the MSB of iod_int is written.
+ */
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ VC7_REG_IOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
+ return err;
+ }
+
+ val = u32_replace_bits(val,
+ vc7->clk_iod[idx].iod_int,
+ VC7_REG_IOD_INT_MASK);
+
+ err = regmap_bulk_write(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ sizeof(u32));
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write IOD%d\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vc7_read_output(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ unsigned int val, out_num;
+
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
+ err = regmap_read(vc7->regmap,
+ VC7_REG_ODRV_EN(out_num),
+ &val);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read ODRV_EN[%d]\n", idx);
+ return err;
+ }
+
+ vc7->clk_out[idx].out_dis = val & VC7_REG_OUT_DIS;
+
+ return 0;
+}
+
+static int vc7_write_output(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ unsigned int out_num;
+
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
+ err = regmap_write_bits(vc7->regmap,
+ VC7_REG_ODRV_EN(out_num),
+ VC7_REG_OUT_DIS,
+ vc7->clk_out[idx].out_dis);
+
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write ODRV_EN[%d]\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned long vc7_get_apll_rate(struct vc7_driver_data *vc7)
+{
+ int err;
+ unsigned long xtal_rate;
+ u64 refin_div, apll_rate;
+
+ xtal_rate = clk_get_rate(vc7->pin_xin);
+ err = vc7_read_apll(vc7);
+ if (err) {
+ dev_err(&vc7->client->dev, "unable to read apll\n");
+ return err;
+ }
+
+ /* 0 is bypassed, 1 is reserved */
+ if (vc7->clk_apll.xo_ib_h_div < 2)
+ refin_div = xtal_rate;
+ else
+ refin_div = div64_u64(xtal_rate, vc7->clk_apll.xo_ib_h_div);
+
+ if (vc7->clk_apll.en_doubler)
+ refin_div *= 2;
+
+ /* divider = int + (frac / 2^27) */
+ apll_rate = (refin_div * vc7->clk_apll.apll_fb_div_int) +
+ ((refin_div * vc7->clk_apll.apll_fb_div_frac) >> VC7_APLL_DENOMINATOR_BITS);
+
+ pr_debug("%s - xo_ib_h_div: %u, apll_fb_div_int: %u, apll_fb_div_frac: %u\n",
+ __func__, vc7->clk_apll.xo_ib_h_div, vc7->clk_apll.apll_fb_div_int,
+ vc7->clk_apll.apll_fb_div_frac);
+ pr_debug("%s - refin_div: %llu, apll rate: %llu\n",
+ __func__, refin_div, apll_rate);
+
+ return apll_rate;
+}
+
+static void vc7_calc_iod_divider(unsigned long rate, unsigned long parent_rate,
+ u32 *divider)
+{
+ *divider = DIV_ROUND_UP(parent_rate, rate);
+ if (*divider < VC7_IOD_MIN_DIVISOR)
+ *divider = VC7_IOD_MIN_DIVISOR;
+ if (*divider > VC7_IOD_MAX_DIVISOR)
+ *divider = VC7_IOD_MAX_DIVISOR;
+}
+
+static void vc7_calc_fod_1st_stage(unsigned long rate, unsigned long parent_rate,
+ u32 *div_int, u64 *div_frac)
+{
+ u64 rem;
+
+ *div_int = (u32)div64_u64_rem(parent_rate, rate, &rem);
+ *div_frac = div64_u64(rem << VC7_FOD_DENOMINATOR_BITS, rate);
+}
+
+static unsigned long vc7_calc_fod_1st_stage_rate(unsigned long parent_rate,
+ u32 fod_1st_int, u64 fod_frac)
+{
+ u64 numer, denom, hi, lo, divisor;
+
+ numer = fod_frac;
+ denom = BIT_ULL(VC7_FOD_DENOMINATOR_BITS);
+
+ if (fod_frac) {
+ vc7_64_mul_64_to_128(parent_rate, denom, &hi, &lo);
+ divisor = ((u64)fod_1st_int * denom) + numer;
+ return vc7_128_div_64_to_64(hi, lo, divisor, NULL);
+ }
+
+ return div64_u64(parent_rate, fod_1st_int);
+}
+
+static unsigned long vc7_calc_fod_2nd_stage_rate(unsigned long parent_rate,
+ u32 fod_1st_int, u32 fod_2nd_int, u64 fod_frac)
+{
+ unsigned long fod_1st_stage_rate;
+
+ fod_1st_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, fod_1st_int, fod_frac);
+
+ if (fod_2nd_int < 2)
+ return fod_1st_stage_rate;
+
+ /*
+ * There is a div-by-2 preceding the 2nd stage integer divider
+ * (not shown on block diagram) so the actual 2nd stage integer
+ * divisor is 2 * N.
+ */
+ return div64_u64(fod_1st_stage_rate >> 1, fod_2nd_int);
+}
+
+static void vc7_calc_fod_divider(unsigned long rate, unsigned long parent_rate,
+ u32 *fod_1st_int, u32 *fod_2nd_int, u64 *fod_frac)
+{
+ unsigned int allow_frac, i, best_frac_i;
+ unsigned long first_stage_rate;
+
+ vc7_calc_fod_1st_stage(rate, parent_rate, fod_1st_int, fod_frac);
+ first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, *fod_1st_int, *fod_frac);
+
+ *fod_2nd_int = 0;
+
+ /* Do we need the second stage integer divider? */
+ if (first_stage_rate < VC7_FOD_1ST_STAGE_RATE_MIN) {
+ allow_frac = 0;
+ best_frac_i = VC7_FOD_2ND_INT_MIN;
+
+ for (i = VC7_FOD_2ND_INT_MIN; i <= VC7_FOD_2ND_INT_MAX; i++) {
+ /*
+ * 1) There is a div-by-2 preceding the 2nd stage integer divider
+ * (not shown on block diagram) so the actual 2nd stage integer
+ * divisor is 2 * N.
+ * 2) Attempt to find an integer solution first. This means stepping
+ * through each 2nd stage integer and recalculating the 1st stage
+ * until the 1st stage frequency is out of bounds. If no integer
+ * solution is found, use the best fractional solution.
+ */
+ vc7_calc_fod_1st_stage(parent_rate, rate * 2 * i, fod_1st_int, fod_frac);
+ first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate,
+ *fod_1st_int,
+ *fod_frac);
+
+ /* Remember the first viable fractional solution */
+ if (best_frac_i == VC7_FOD_2ND_INT_MIN &&
+ first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MIN) {
+ best_frac_i = i;
+ }
+
+ /* Is the divider viable? Prefer integer solutions over fractional. */
+ if (*fod_1st_int < VC7_FOD_1ST_INT_MAX &&
+ first_stage_rate >= VC7_FOD_1ST_STAGE_RATE_MIN &&
+ (allow_frac || *fod_frac == 0)) {
+ *fod_2nd_int = i;
+ break;
+ }
+
+ /* Ran out of divisors or the 1st stage frequency is out of range */
+ if (i >= VC7_FOD_2ND_INT_MAX ||
+ first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MAX) {
+ allow_frac = 1;
+ i = best_frac_i;
+
+ /* Restore the best frac and rerun the loop for the last time */
+ if (best_frac_i != VC7_FOD_2ND_INT_MIN)
+ i--;
+
+ continue;
+ }
+ }
+ }
+}
+
+static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ struct vc7_driver_data *vc7 = fod->vc7;
+ int err;
+ unsigned long fod_rate;
+
+ err = vc7_read_fod(vc7, fod->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ pr_debug("%s - %s: parent_rate: %lu\n", __func__, clk_hw_get_name(hw), parent_rate);
+
+ fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return fod_rate;
+}
+
+static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ unsigned long fod_rate;
+
+ pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, *parent_rate);
+
+ vc7_calc_fod_divider(rate, *parent_rate,
+ &fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
+ fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return fod_rate;
+}
+
+static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ struct vc7_driver_data *vc7 = fod->vc7;
+ unsigned long fod_rate;
+
+ pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, parent_rate);
+
+ if (rate < VC7_FOD_RATE_MIN || rate > VC7_FOD_RATE_MAX) {
+ dev_err(&vc7->client->dev,
+ "requested frequency %lu Hz for %s is out of range\n",
+ rate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ vc7_write_fod(vc7, fod->num);
+
+ fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return 0;
+}
+
+static const struct clk_ops vc7_fod_ops = {
+ .recalc_rate = vc7_fod_recalc_rate,
+ .round_rate = vc7_fod_round_rate,
+ .set_rate = vc7_fod_set_rate,
+};
+
+static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ struct vc7_driver_data *vc7 = iod->vc7;
+ int err;
+ unsigned long iod_rate;
+
+ err = vc7_read_iod(vc7, iod->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ iod_rate = div64_u64(parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return iod_rate;
+}
+
+static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ unsigned long iod_rate;
+
+ pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, *parent_rate);
+
+ vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(*parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return iod_rate;
+}
+
+static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ struct vc7_driver_data *vc7 = iod->vc7;
+ unsigned long iod_rate;
+
+ pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, parent_rate);
+
+ if (rate < VC7_IOD_RATE_MIN || rate > VC7_IOD_RATE_MAX) {
+ dev_err(&vc7->client->dev,
+ "requested frequency %lu Hz for %s is out of range\n",
+ rate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ vc7_write_iod(vc7, iod->num);
+
+ iod_rate = div64_u64(parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return 0;
+}
+
+static const struct clk_ops vc7_iod_ops = {
+ .recalc_rate = vc7_iod_recalc_rate,
+ .round_rate = vc7_iod_round_rate,
+ .set_rate = vc7_iod_set_rate,
+};
+
+static int vc7_clk_out_prepare(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err;
+
+ out->out_dis = 0;
+
+ err = vc7_write_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error writing registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ pr_debug("%s - %s: clk prepared\n", __func__, clk_hw_get_name(hw));
+
+ return 0;
+}
+
+static void vc7_clk_out_unprepare(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err;
+
+ out->out_dis = 1;
+
+ err = vc7_write_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error writing registers for %s\n",
+ clk_hw_get_name(hw));
+ return;
+ }
+
+ pr_debug("%s - %s: clk unprepared\n", __func__, clk_hw_get_name(hw));
+}
+
+static int vc7_clk_out_is_enabled(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err, is_enabled;
+
+ err = vc7_read_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ is_enabled = !out->out_dis;
+
+ pr_debug("%s - %s: is_enabled=%d\n", __func__, clk_hw_get_name(hw), is_enabled);
+
+ return is_enabled;
+}
+
+static const struct clk_ops vc7_clk_out_ops = {
+ .prepare = vc7_clk_out_prepare,
+ .unprepare = vc7_clk_out_unprepare,
+ .is_enabled = vc7_clk_out_is_enabled,
+};
+
+static int vc7_probe(struct i2c_client *client)
+{
+ struct vc7_driver_data *vc7;
+ struct clk_init_data clk_init;
+ struct vc7_bank_src_map bank_src_map;
+ const char *node_name, *apll_name;
+ const char *parent_names[1];
+ unsigned int i, val, bank_idx, out_num;
+ unsigned long apll_rate;
+ int ret;
+
+ vc7 = devm_kzalloc(&client->dev, sizeof(*vc7), GFP_KERNEL);
+ if (!vc7)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, vc7);
+ vc7->client = client;
+ vc7->chip_info = of_device_get_match_data(&client->dev);
+
+ vc7->pin_xin = devm_clk_get(&client->dev, "xin");
+ if (PTR_ERR(vc7->pin_xin) == -EPROBE_DEFER) {
+ return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ "xin not specified\n");
+ }
+
+ vc7->regmap = devm_regmap_init_i2c(client, &vc7_regmap_config);
+ if (IS_ERR(vc7->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(vc7->regmap),
+ "failed to allocate register map\n");
+ }
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &node_name))
+ node_name = client->dev.of_node->name;
+
+ /* Register APLL */
+ apll_rate = vc7_get_apll_rate(vc7);
+ apll_name = kasprintf(GFP_KERNEL, "%s_apll", node_name);
+ vc7->clk_apll.clk = clk_register_fixed_rate(&client->dev, apll_name,
+ __clk_get_name(vc7->pin_xin),
+ 0, apll_rate);
+ kfree(apll_name); /* ccf made a copy of the name */
+ if (IS_ERR(vc7->clk_apll.clk)) {
+ return dev_err_probe(&client->dev, PTR_ERR(vc7->clk_apll.clk),
+ "failed to register apll\n");
+ }
+
+ /* Register FODs */
+ for (i = 0; i < VC7_NUM_FOD; i++) {
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_fod%d", node_name, i);
+ clk_init.ops = &vc7_fod_ops;
+ clk_init.parent_names = parent_names;
+ parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
+ clk_init.num_parents = 1;
+ vc7->clk_fod[i].num = i;
+ vc7->clk_fod[i].vc7 = vc7;
+ vc7->clk_fod[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_fod[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ /* Register IODs */
+ for (i = 0; i < VC7_NUM_IOD; i++) {
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_iod%d", node_name, i);
+ clk_init.ops = &vc7_iod_ops;
+ clk_init.parent_names = parent_names;
+ parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
+ clk_init.num_parents = 1;
+ vc7->clk_iod[i].num = i;
+ vc7->clk_iod[i].vc7 = vc7;
+ vc7->clk_iod[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_iod[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ /* Register outputs */
+ for (i = 0; i < vc7->chip_info->num_outputs; i++) {
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, i);
+
+ /*
+ * This driver does not support remapping FOD/IOD to banks.
+ * The device state is read and the driver is setup to match
+ * the device's existing mapping.
+ */
+ bank_idx = output_bank_mapping[out_num];
+
+ regmap_read(vc7->regmap, VC7_REG_OUT_BANK_CNFG(bank_idx), &val);
+ val &= VC7_REG_OUTPUT_BANK_SRC_MASK;
+
+ memset(&bank_src_map, 0, sizeof(bank_src_map));
+ ret = vc7_get_bank_clk(vc7, bank_idx, val, &bank_src_map);
+ if (ret) {
+ dev_err_probe(&client->dev, ret,
+ "unable to register output %d\n", i);
+ return ret;
+ }
+
+ switch (bank_src_map.type) {
+ case VC7_FOD:
+ parent_names[0] = clk_hw_get_name(&bank_src_map.src.fod->hw);
+ break;
+ case VC7_IOD:
+ parent_names[0] = clk_hw_get_name(&bank_src_map.src.iod->hw);
+ break;
+ }
+
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_out%d", node_name, i);
+ clk_init.ops = &vc7_clk_out_ops;
+ clk_init.flags = CLK_SET_RATE_PARENT;
+ clk_init.parent_names = parent_names;
+ clk_init.num_parents = 1;
+ vc7->clk_out[i].num = i;
+ vc7->clk_out[i].vc7 = vc7;
+ vc7->clk_out[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_out[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ ret = of_clk_add_hw_provider(client->dev.of_node, vc7_of_clk_get, vc7);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "unable to add clk provider\n");
+ goto err_clk;
+ }
+
+ return ret;
+
+err_clk_register:
+ dev_err_probe(&client->dev, ret,
+ "unable to register %s\n", clk_init.name);
+ kfree(clk_init.name); /* ccf made a copy of the name */
+err_clk:
+ clk_unregister_fixed_rate(vc7->clk_apll.clk);
+ return ret;
+}
+
+static void vc7_remove(struct i2c_client *client)
+{
+ struct vc7_driver_data *vc7 = i2c_get_clientdata(client);
+
+ of_clk_del_provider(client->dev.of_node);
+ clk_unregister_fixed_rate(vc7->clk_apll.clk);
+}
+
+static bool vc7_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == VC7_PAGE_ADDR)
+ return false;
+
+ return true;
+}
+
+static const struct vc7_chip_info vc7_rc21008a_info = {
+ .model = VC7_RC21008A,
+ .num_banks = 6,
+ .num_outputs = 8,
+};
+
+static struct regmap_range_cfg vc7_range_cfg[] = {
+{
+ .range_min = 0,
+ .range_max = VC7_MAX_REG,
+ .selector_reg = VC7_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = VC7_PAGE_WINDOW,
+}};
+
+static const struct regmap_config vc7_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VC7_MAX_REG,
+ .ranges = vc7_range_cfg,
+ .num_ranges = ARRAY_SIZE(vc7_range_cfg),
+ .volatile_reg = vc7_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct i2c_device_id vc7_i2c_id[] = {
+ { "rc21008a", VC7_RC21008A },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, vc7_i2c_id);
+
+static const struct of_device_id vc7_of_match[] = {
+ { .compatible = "renesas,rc21008a", .data = &vc7_rc21008a_info },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vc7_of_match);
+
+static struct i2c_driver vc7_i2c_driver = {
+ .driver = {
+ .name = "vc7",
+ .of_match_table = vc7_of_match,
+ },
+ .probe_new = vc7_probe,
+ .remove = vc7_remove,
+ .id_table = vc7_i2c_id,
+};
+module_i2c_driver(vc7_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Helms <alexander.helms.jy@renesas.com");
+MODULE_DESCRIPTION("Renesas Versaclock7 common clock framework driver");
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 857217cbcef8..0c3d0cee98c8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -522,10 +522,10 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
data & pclk->param.reg_clk_mask ? "enabled" :
"disabled");
+ } else {
+ return 1;
}
- if (!pclk->param.csr_reg)
- return 1;
return data & pclk->param.reg_clk_mask ? 1 : 0;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index bd0b35cac83e..c3c3f8c07258 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -536,6 +536,53 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
+static void clk_core_init_rate_req(struct clk_core * const core,
+ struct clk_rate_request *req,
+ unsigned long rate);
+
+static int clk_core_round_rate_nolock(struct clk_core *core,
+ struct clk_rate_request *req);
+
+static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
+{
+ struct clk_core *tmp;
+ unsigned int i;
+
+ /* Optimize for the case where the parent is already the parent. */
+ if (core->parent == parent)
+ return true;
+
+ for (i = 0; i < core->num_parents; i++) {
+ tmp = clk_core_get_parent_by_index(core, i);
+ if (!tmp)
+ continue;
+
+ if (tmp == parent)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+clk_core_forward_rate_req(struct clk_core *core,
+ const struct clk_rate_request *old_req,
+ struct clk_core *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!clk_core_has_parent(core, parent)))
+ return;
+
+ clk_core_init_rate_req(parent, req, parent_rate);
+
+ if (req->min_rate < old_req->min_rate)
+ req->min_rate = old_req->min_rate;
+
+ if (req->max_rate > old_req->max_rate)
+ req->max_rate = old_req->max_rate;
+}
+
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags)
@@ -543,14 +590,20 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
unsigned long best = 0;
- struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */
if (core->flags & CLK_SET_RATE_NO_REPARENT) {
parent = core->parent;
if (core->flags & CLK_SET_RATE_PARENT) {
- ret = __clk_determine_rate(parent ? parent->hw : NULL,
- &parent_req);
+ struct clk_rate_request parent_req;
+
+ if (!parent) {
+ req->rate = 0;
+ return 0;
+ }
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
return ret;
@@ -567,23 +620,29 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
/* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) {
+ unsigned long parent_rate;
+
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
if (core->flags & CLK_SET_RATE_PARENT) {
- parent_req = *req;
- ret = __clk_determine_rate(parent->hw, &parent_req);
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
+
+ parent_rate = parent_req.rate;
} else {
- parent_req.rate = clk_core_get_rate_nolock(parent);
+ parent_rate = clk_core_get_rate_nolock(parent);
}
- if (mux_is_better_rate(req->rate, parent_req.rate,
+ if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) {
best_parent = parent;
- best = parent_req.rate;
+ best = parent_rate;
}
}
@@ -625,6 +684,22 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
+/*
+ * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
+ * @hw: the hw clk we want to get the range from
+ * @min_rate: pointer to the variable that will hold the minimum
+ * @max_rate: pointer to the variable that will hold the maximum
+ *
+ * Fills the @min_rate and @max_rate variables with the minimum and
+ * maximum that clock can reach.
+ */
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate)
+{
+ clk_core_get_boundaries(hw->core, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
+
static bool clk_core_check_boundaries(struct clk_core *core,
unsigned long min_rate,
unsigned long max_rate)
@@ -1340,7 +1415,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
if (!core)
return 0;
- req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+ /*
+ * Some clock providers hand-craft their clk_rate_requests and
+ * might not fill min_rate and max_rate.
+ *
+ * If it's the case, clamping the rate is equivalent to setting
+ * the rate to 0 which is bad. Skip the clamping but complain so
+ * that it gets fixed, hopefully.
+ */
+ if (!req->min_rate && !req->max_rate)
+ pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
+ __func__, core->name);
+ else
+ req->rate = clamp(req->rate, req->min_rate, req->max_rate);
/*
* At this point, core protection will be disabled
@@ -1367,13 +1454,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
}
static void clk_core_init_rate_req(struct clk_core * const core,
- struct clk_rate_request *req)
+ struct clk_rate_request *req,
+ unsigned long rate)
{
struct clk_core *parent;
if (WARN_ON(!core || !req))
return;
+ memset(req, 0, sizeof(*req));
+
+ req->rate = rate;
+ clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
+
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@@ -1384,6 +1477,51 @@ static void clk_core_init_rate_req(struct clk_core * const core,
}
}
+/**
+ * clk_hw_init_rate_request - Initializes a clk_rate_request
+ * @hw: the clk for which we want to submit a rate request
+ * @req: the clk_rate_request structure we want to initialise
+ * @rate: the rate which is to be requested
+ *
+ * Initializes a clk_rate_request structure to submit to
+ * __clk_determine_rate() or similar functions.
+ */
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate)
+{
+ if (WARN_ON(!hw || !req))
+ return;
+
+ clk_core_init_rate_req(hw->core, req, rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
+
+/**
+ * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
+ * @hw: the original clock that got the rate request
+ * @old_req: the original clk_rate_request structure we want to forward
+ * @parent: the clk we want to forward @old_req to
+ * @req: the clk_rate_request structure we want to initialise
+ * @parent_rate: The rate which is to be requested to @parent
+ *
+ * Initializes a clk_rate_request structure to submit to a clock parent
+ * in __clk_determine_rate() or similar functions.
+ */
+void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!hw || !old_req || !parent || !req))
+ return;
+
+ clk_core_forward_rate_req(hw->core, old_req,
+ parent->core, req,
+ parent_rate);
+}
+
static bool clk_core_can_round(struct clk_core * const core)
{
return core->ops->determine_rate || core->ops->round_rate;
@@ -1392,6 +1530,8 @@ static bool clk_core_can_round(struct clk_core * const core)
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
+ int ret;
+
lockdep_assert_held(&prepare_lock);
if (!core) {
@@ -1399,12 +1539,22 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
return 0;
}
- clk_core_init_rate_req(core, req);
-
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
- else if (core->flags & CLK_SET_RATE_PARENT)
- return clk_core_round_rate_nolock(core->parent, req);
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(core->parent, &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+ req->rate = parent_req.rate;
+
+ return 0;
+ }
req->rate = core->rate;
return 0;
@@ -1448,8 +1598,7 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
int ret;
struct clk_rate_request req;
- clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(hw->core, &req, rate);
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
@@ -1481,8 +1630,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
- clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(clk->core, &req, rate);
ret = clk_core_round_rate_nolock(clk->core, &req);
@@ -1611,6 +1759,7 @@ static unsigned long clk_recalc(struct clk_core *core,
/**
* __clk_recalc_rates
* @core: first clk in the subtree
+ * @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
@@ -1620,7 +1769,8 @@ static unsigned long clk_recalc(struct clk_core *core,
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*/
-static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *core, bool update_req,
+ unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
@@ -1634,6 +1784,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate);
+ if (update_req)
+ core->req_rate = core->rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
@@ -1643,13 +1795,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node)
- __clk_recalc_rates(child, msg);
+ __clk_recalc_rates(child, update_req, msg);
}
static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
- __clk_recalc_rates(core, 0);
+ __clk_recalc_rates(core, false, 0);
return clk_core_get_rate_nolock(core);
}
@@ -1659,8 +1811,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
+ * is set, which means a recalc_rate will be issued. Can be called regardless of
+ * the clock enabledness. If clk is NULL, or if an error occurred, then returns
+ * 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -1864,6 +2017,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
+
__clk_set_parent_after(core, old_parent, parent);
return ret;
@@ -1969,11 +2123,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
if (clk_core_can_round(core)) {
struct clk_rate_request req;
- req.rate = rate;
- req.min_rate = min_rate;
- req.max_rate = max_rate;
-
- clk_core_init_rate_req(core, &req);
+ clk_core_init_rate_req(core, &req, rate);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
@@ -2172,8 +2322,7 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
if (cnt < 0)
return cnt;
- clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
- req.rate = req_rate;
+ clk_core_init_rate_req(core, &req, req_rate);
ret = clk_core_round_rate_nolock(core, &req);
@@ -2188,7 +2337,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate;
- int ret = 0;
+ int ret;
if (!core)
return 0;
@@ -2324,19 +2473,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+static int clk_set_rate_range_nolock(struct clk *clk,
+ unsigned long min,
+ unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
@@ -2349,8 +2494,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
return -EINVAL;
}
- clk_prepare_lock();
-
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
@@ -2365,6 +2508,10 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
goto out;
}
+ rate = clk->core->req_rate;
+ if (clk->core->flags & CLK_GET_RATE_NOCACHE)
+ rate = clk_core_get_rate_recalc(clk->core);
+
/*
* Since the boundaries have been changed, let's give the
* opportunity to the provider to adjust the clock rate based on
@@ -2382,7 +2529,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
* - the determine_rate() callback does not really check for
* this corner case when determining the rate
*/
- rate = clamp(clk->core->req_rate, min, max);
+ rate = clamp(rate, min, max);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) {
/* rollback the changes */
@@ -2394,6 +2541,28 @@ out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
+ return ret;
+}
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Return: 0 for success or negative errno on failure.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+ int ret;
+
+ if (!clk)
+ return 0;
+
+ clk_prepare_lock();
+
+ ret = clk_set_rate_range_nolock(clk, min, max);
+
clk_prepare_unlock();
return ret;
@@ -2473,7 +2642,7 @@ static void clk_core_reparent(struct clk_core *core,
{
clk_reparent(core, new_parent);
__clk_recalc_accuracies(core);
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
}
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
@@ -2494,27 +2663,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{
- struct clk_core *core, *parent_core;
- int i;
-
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
return true;
- core = clk->core;
- parent_core = parent->core;
-
- /* Optimize for the case where the parent is already the parent. */
- if (core->parent == parent_core)
- return true;
-
- for (i = 0; i < core->num_parents; i++)
- if (!strcmp(core->parents[i].name, parent_core->name))
- return true;
-
- return false;
+ return clk_core_has_parent(clk->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2571,9 +2726,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
- __clk_recalc_rates(core, ABORT_RATE_CHANGE);
+ __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else {
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core);
}
@@ -3461,7 +3616,7 @@ static void clk_core_reparent_orphans_nolock(void)
/*
* We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
+ * properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
@@ -3470,7 +3625,7 @@ static void clk_core_reparent_orphans_nolock(void)
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
+ __clk_recalc_rates(orphan, true, 0);
/*
* __clk_init_parent() will set the initial req_rate to
@@ -3671,7 +3826,6 @@ static int __clk_core_init(struct clk_core *core)
clk_core_reparent_orphans_nolock();
-
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
@@ -4347,9 +4501,10 @@ void __clk_put(struct clk *clk)
}
hlist_del(&clk->clks_node);
- if (clk->min_rate > clk->core->req_rate ||
- clk->max_rate < clk->core->req_rate)
- clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+ /* If we had any boundaries on that clock, let's drop them. */
+ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
@@ -4750,32 +4905,6 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-static int devm_clk_provider_match(struct device *dev, void *res, void *data)
-{
- struct device_node **np = res;
-
- if (WARN_ON(!np || !*np))
- return 0;
-
- return *np == data;
-}
-
-/**
- * devm_of_clk_del_provider() - Remove clock provider registered using devm
- * @dev: Device to whose lifetime the clock provider was bound
- */
-void devm_of_clk_del_provider(struct device *dev)
-{
- int ret;
- struct device_node *np = get_clk_provider_node(dev);
-
- ret = devres_release(dev, devm_of_clk_release_provider,
- devm_clk_provider_match, np);
-
- WARN_ON(ret);
-}
-EXPORT_SYMBOL(devm_of_clk_del_provider);
-
/**
* of_parse_clkspec() - Parse a DT clock specifier for a given device node
* @np: device node to parse clock specifier from
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index 6731a822f4e3..f9a5c2964c65 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -108,6 +108,39 @@ static const struct clk_ops clk_dummy_single_parent_ops = {
.get_parent = clk_dummy_single_get_parent,
};
+struct clk_multiple_parent_ctx {
+ struct clk_dummy_context parents_ctx[2];
+ struct clk_hw hw;
+ u8 current_parent;
+};
+
+static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_multiple_parent_ctx *ctx =
+ container_of(hw, struct clk_multiple_parent_ctx, hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ ctx->current_parent = index;
+
+ return 0;
+}
+
+static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_multiple_parent_ctx *ctx =
+ container_of(hw, struct clk_multiple_parent_ctx, hw);
+
+ return ctx->current_parent;
+}
+
+static const struct clk_ops clk_multiple_parents_mux_ops = {
+ .get_parent = clk_multiple_parents_mux_get_parent,
+ .set_parent = clk_multiple_parents_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+};
+
static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
{
struct clk_dummy_context *ctx;
@@ -160,12 +193,14 @@ static void clk_test_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, ctx->rate);
+
+ clk_put(clk);
}
/*
@@ -179,7 +214,7 @@ static void clk_test_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -189,6 +224,8 @@ static void clk_test_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -202,7 +239,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -216,6 +253,8 @@ static void clk_test_set_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -226,7 +265,7 @@ static void clk_test_round_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rounded_rate, set_rate;
rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
@@ -240,6 +279,8 @@ static void clk_test_round_set_get_rate(struct kunit *test)
set_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, set_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
+
+ clk_put(clk);
}
static struct kunit_case clk_test_cases[] = {
@@ -250,6 +291,11 @@ static struct kunit_case clk_test_cases[] = {
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate API with simple scenarios
+ */
static struct kunit_suite clk_test_suite = {
.name = "clk-test",
.init = clk_test_init,
@@ -257,11 +303,912 @@ static struct kunit_suite clk_test_suite = {
.test_cases = clk_test_cases,
};
+static int clk_uncached_test_init(struct kunit *test)
+{
+ struct clk_dummy_context *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
+ &clk_dummy_rate_ops,
+ CLK_GET_RATE_NOCACHE);
+
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Test that for an uncached clock, the clock framework doesn't cache
+ * the rate and clk_get_rate() will return the underlying clock rate
+ * even if it changed.
+ */
+static void clk_test_uncached_get_rate(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ /* We change the rate behind the clock framework's back */
+ ctx->rate = DUMMY_CLOCK_RATE_1;
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for an uncached clock, clk_set_rate_range() will work
+ * properly if the rate hasn't changed.
+ */
+static void clk_test_uncached_set_range(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for an uncached clock, clk_set_rate_range() will work
+ * properly if the rate has changed in hardware.
+ *
+ * In this case, it means that if the rate wasn't initially in the range
+ * we're trying to set, but got changed at some point into the range
+ * without the kernel knowing about it, its rate shouldn't be affected.
+ */
+static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ /* We change the rate behind the clock framework's back */
+ ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_uncached_test_cases[] = {
+ KUNIT_CASE(clk_test_uncached_get_rate),
+ KUNIT_CASE(clk_test_uncached_set_range),
+ KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
+ {}
+};
+
+/*
+ * Test suite for a basic, uncached, rate clock, without any parent.
+ *
+ * These tests exercise the rate API with simple scenarios
+ */
+static struct kunit_suite clk_uncached_test_suite = {
+ .name = "clk-uncached-test",
+ .init = clk_uncached_test_init,
+ .exit = clk_test_exit,
+ .test_cases = clk_uncached_test_cases,
+};
+
+static int
+clk_multiple_parents_mux_test_init(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx;
+ const char *parents[2] = { "parent-0", "parent-1"};
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->current_parent = 0;
+ ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
+ &clk_multiple_parents_mux_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_multiple_parents_mux_test_exit(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->parents_ctx[1].hw);
+}
+
+/*
+ * Test that for a clock with multiple parents, clk_get_parent()
+ * actually returns the current one.
+ */
+static void
+clk_test_multiple_parents_mux_get_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a multiple parents, clk_has_parent()
+ * actually reports all of them as parents.
+ */
+static void
+clk_test_multiple_parents_mux_has_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+ clk_put(parent);
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+ clk_put(parent);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a multiple parents, if we set a range on
+ * that clock and the parent is changed, its rate after the reparenting
+ * is still within the range we asked for.
+ *
+ * FIXME: clk_set_parent() only does the reparenting but doesn't
+ * reevaluate whether the new clock rate is within its boundaries or
+ * not.
+ */
+static void
+clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent1, *parent2;
+ unsigned long rate;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
+ KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
+
+ parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
+
+ ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1 - 1000,
+ DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_parent(clk, parent2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+
+ clk_put(parent2);
+ clk_put(parent1);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
+ KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
+ KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with two parents, with
+ * CLK_SET_RATE_PARENT on the child.
+ *
+ * These tests exercise the consumer API and check that the state of the
+ * child and parents are sane and consistent.
+ */
+static struct kunit_suite
+clk_multiple_parents_mux_test_suite = {
+ .name = "clk-multiple-parents-mux-test",
+ .init = clk_multiple_parents_mux_test_init,
+ .exit = clk_multiple_parents_mux_test_exit,
+ .test_cases = clk_multiple_parents_mux_test_cases,
+};
+
+static int
+clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx;
+ const char *parents[2] = { "missing-parent", "proper-parent"};
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
+ &clk_multiple_parents_mux_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parents_ctx[1].hw);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet and is
+ * thus orphan, clk_get_parent() will return NULL.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet,
+ * calling clk_set_parent() to a valid parent will properly update the
+ * mux parent and its orphan status.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent, *new_parent;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent = clk_get_parent(clk);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+ KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_drop_range() on the mux won't affect the parent
+ * rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_drop_range(clk);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent_rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, the rate of the mux and its new parent are consistent.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_put() on the mux won't affect the parent rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk *clk, *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ clk = clk_hw_get_clk(&ctx->hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ clk_put(clk);
+
+ new_parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_set_rate_range() will affect the parent state if
+ * its rate is out of range.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_set_rate_range() won't affect the parent state if
+ * its rate is within range.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_INIT_RATE - 1000,
+ DUMMY_CLOCK_INIT_RATE + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet,
+ * calling clk_set_rate_range() will succeed, and will be taken into
+ * account when rounding a rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+ int ret;
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan, was assigned and rate and
+ * then got switched to a valid parent, its rate is eventually within
+ * range.
+ *
+ * FIXME: Even though we update the rate as part of clk_set_parent(), we
+ * don't evaluate whether that new rate is within range and needs to be
+ * adjusted.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with two parents. The default parent
+ * isn't registered, only the second parent is. By default, the clock
+ * will thus be orphan.
+ *
+ * These tests exercise the behaviour of the consumer API when dealing
+ * with an orphan clock, and how we deal with the transition to a valid
+ * parent.
+ */
+static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
+ .name = "clk-orphan-transparent-multiple-parent-mux-test",
+ .init = clk_orphan_transparent_multiple_parent_mux_test_init,
+ .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
+ .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
+};
+
struct clk_single_parent_ctx {
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
+static int clk_single_parent_mux_test_init(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->parent_ctx.hw.init =
+ CLK_HW_INIT_NO_PARENT("parent-clk",
+ &clk_dummy_rate_ops,
+ 0);
+
+ ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_single_parent_mux_test_exit(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parent_ctx.hw);
+}
+
+/*
+ * Test that for a clock with a single parent, clk_get_parent() actually
+ * returns the parent.
+ */
+static void
+clk_test_single_parent_mux_get_parent(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a single parent, clk_has_parent() actually
+ * reports it as a parent.
+ */
+static void
+clk_test_single_parent_mux_has_parent(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set disjoints range on the parent and then the child,
+ * the second will return an error.
+ *
+ * FIXME: clk_set_rate_range() only considers the current clock when
+ * evaluating whether ranges are disjoints and not the upstream clocks
+ * ranges.
+ */
+static void
+clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, 1000, 2000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, 3000, 4000);
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set disjoints range on the child and then the parent,
+ * the second will return an error.
+ *
+ * FIXME: clk_set_rate_range() only considers the current clock when
+ * evaluating whether ranges are disjoints and not the downstream clocks
+ * ranges.
+ */
+static void
+clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(clk, 1000, 2000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(parent, 3000, 4000);
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the parent and then call
+ * clk_round_rate(), the boundaries of the parent are taken into
+ * account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the parent and a more restrictive one on
+ * the child, and then call clk_round_rate(), the boundaries of the
+ * two clocks are taken into account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the child and a more restrictive one on
+ * the parent, and then call clk_round_rate(), the boundaries of the
+ * two clocks are taken into account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_single_parent_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_single_parent_mux_get_parent),
+ KUNIT_CASE(clk_test_single_parent_mux_has_parent),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with one parent, with
+ * CLK_SET_RATE_PARENT on the child.
+ *
+ * These tests exercise the consumer API and check that the state of the
+ * child and parent are sane and consistent.
+ */
+static struct kunit_suite
+clk_single_parent_mux_test_suite = {
+ .name = "clk-single-parent-mux-test",
+ .init = clk_single_parent_mux_test_init,
+ .exit = clk_single_parent_mux_test_exit,
+ .test_cases = clk_single_parent_mux_test_cases,
+};
+
static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
@@ -298,23 +1245,18 @@ static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test
return 0;
}
-static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
-{
- struct clk_single_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parent_ctx.hw);
-}
-
/*
* Test that a mux-only clock, with an initial rate within a range,
* will still have the same rate after the range has been enforced.
+ *
+ * See:
+ * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
*/
static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate, new_rate;
rate = clk_get_rate(clk);
@@ -329,6 +1271,8 @@ static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
new_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_rate, 0);
KUNIT_EXPECT_EQ(test, rate, new_rate);
+
+ clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
@@ -336,13 +1280,151 @@ static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] =
{}
};
+/*
+ * Test suite for a basic mux clock with one parent. The parent is
+ * registered after its child. The clock will thus be an orphan when
+ * registered, but will no longer be when the tests run.
+ *
+ * These tests make sure a clock that used to be orphan has a sane,
+ * consistent, behaviour.
+ */
static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
.name = "clk-orphan-transparent-single-parent-test",
.init = clk_orphan_transparent_single_parent_mux_test_init,
- .exit = clk_orphan_transparent_single_parent_mux_test_exit,
+ .exit = clk_single_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
};
+struct clk_single_parent_two_lvl_ctx {
+ struct clk_dummy_context parent_parent_ctx;
+ struct clk_dummy_context parent_ctx;
+ struct clk_hw hw;
+};
+
+static int
+clk_orphan_two_level_root_last_test_init(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parent_ctx.hw.init =
+ CLK_HW_INIT("intermediate-parent",
+ "root-parent",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init =
+ CLK_HW_INIT("test-clk", "intermediate-parent",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->parent_parent_ctx.hw.init =
+ CLK_HW_INIT_NO_PARENT("root-parent",
+ &clk_dummy_rate_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_orphan_two_level_root_last_test_exit(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parent_ctx.hw);
+ clk_hw_unregister(&ctx->parent_parent_ctx.hw);
+}
+
+/*
+ * Test that, for a clock whose parent used to be orphan, clk_get_rate()
+ * will return the proper rate.
+ */
+static void
+clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ rate = clk_get_rate(clk);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a clock whose parent used to be orphan,
+ * clk_set_rate_range() won't affect its rate if it is already within
+ * range.
+ *
+ * See (for Exynos 4210):
+ * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
+ */
+static void
+clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+ int ret;
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_INIT_RATE - 1000,
+ DUMMY_CLOCK_INIT_RATE + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ clk_put(clk);
+}
+
+static struct kunit_case
+clk_orphan_two_level_root_last_test_cases[] = {
+ KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
+ KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
+ {}
+};
+
+/*
+ * Test suite for a basic, transparent, clock with a parent that is also
+ * such a clock. The parent's parent is registered last, while the
+ * parent and its child are registered in that order. The intermediate
+ * and leaf clocks will thus be orphan when registered, but the leaf
+ * clock itself will always have its parent and will never be
+ * reparented. Indeed, it's only orphan because its parent is.
+ *
+ * These tests exercise the behaviour of the consumer API when dealing
+ * with an orphan clock, and how we deal with the transition to a valid
+ * parent.
+ */
+static struct kunit_suite
+clk_orphan_two_level_root_last_test_suite = {
+ .name = "clk-orphan-two-level-root-last-test",
+ .init = clk_orphan_two_level_root_last_test_init,
+ .exit = clk_orphan_two_level_root_last_test_exit,
+ .test_cases = clk_orphan_two_level_root_last_test_cases,
+};
+
/*
* Test that clk_set_rate_range won't return an error for a valid range
* and that it will make sure the rate of the clock is within the
@@ -352,7 +1434,7 @@ static void clk_range_test_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -365,6 +1447,8 @@ static void clk_range_test_set_range(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -375,13 +1459,15 @@ static void clk_range_test_set_range_invalid(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_LT(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 + 1000,
DUMMY_CLOCK_RATE_1),
0);
+
+ clk_put(clk);
}
/*
@@ -420,7 +1506,7 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
@@ -433,6 +1519,8 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -443,7 +1531,7 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -460,6 +1548,8 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -472,7 +1562,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
@@ -489,6 +1579,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+
+ clk_put(clk);
}
/*
@@ -499,7 +1591,7 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
@@ -512,6 +1604,8 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -522,7 +1616,7 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -539,6 +1633,8 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -551,7 +1647,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
@@ -568,6 +1664,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+
+ clk_put(clk);
}
/*
@@ -582,7 +1680,7 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -598,6 +1696,8 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -612,7 +1712,7 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -628,6 +1728,8 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
static struct kunit_case clk_range_test_cases[] = {
@@ -645,6 +1747,12 @@ static struct kunit_case clk_range_test_cases[] = {
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
+ */
static struct kunit_suite clk_range_test_suite = {
.name = "clk-range-test",
.init = clk_test_init,
@@ -664,7 +1772,7 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -700,6 +1808,8 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -714,7 +1824,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
@@ -758,14 +1868,79 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
clk_put(user2);
clk_put(user1);
+ clk_put(clk);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_maximize_rate_ops, this means that the rate will
+ * trail along the maximum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *user1, *user2;
+ unsigned long rate;
+
+ user1 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+ user2 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+ 0);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user1,
+ 0,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user2,
+ 0,
+ DUMMY_CLOCK_RATE_1),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(user2);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(user1);
+ clk_put(clk);
}
static struct kunit_case clk_range_maximize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
+ KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
+ * driver that will always try to run at the highest possible rate.
+ */
static struct kunit_suite clk_range_maximize_test_suite = {
.name = "clk-range-maximize-test",
.init = clk_maximize_test_init,
@@ -785,7 +1960,7 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -821,6 +1996,8 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -835,7 +2012,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
@@ -875,14 +2052,75 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
clk_put(user2);
clk_put(user1);
+ clk_put(clk);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_minimize_rate_ops, this means that the rate will
+ * trail along the minimum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *user1, *user2;
+ unsigned long rate;
+
+ user1 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+ user2 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user1,
+ DUMMY_CLOCK_RATE_1,
+ ULONG_MAX),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user2,
+ DUMMY_CLOCK_RATE_2,
+ ULONG_MAX),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(user2);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(user1);
+ clk_put(clk);
}
static struct kunit_case clk_range_minimize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
+ KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
+ * driver that will always try to run at the lowest possible rate.
+ */
static struct kunit_suite clk_range_minimize_test_suite = {
.name = "clk-range-minimize-test",
.init = clk_minimize_test_init,
@@ -890,11 +2128,284 @@ static struct kunit_suite clk_range_minimize_test_suite = {
.test_cases = clk_range_minimize_test_cases,
};
+struct clk_leaf_mux_ctx {
+ struct clk_multiple_parent_ctx mux_ctx;
+ struct clk_hw hw;
+};
+
+static int
+clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx;
+ const char *top_parents[2] = { "parent-0", "parent-1" };
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.current_parent = 0;
+ ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
+ &clk_multiple_parents_mux_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->mux_ctx.hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
+}
+
+/*
+ * Test that, for a clock that will forward any rate request to its
+ * parent, the rate request structure returned by __clk_determine_rate
+ * is sane and will be what we expect.
+ */
+static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk_rate_request req;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
+
+ ret = __clk_determine_rate(hw, &req);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
+ KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
+ KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
+ KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
+ {}
+};
+
+/*
+ * Test suite for a clock whose parent is a mux with multiple parents.
+ * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
+ * requests to the mux, which will then select which parent is the best
+ * fit for a given rate.
+ *
+ * These tests exercise the behaviour of muxes, and the proper selection
+ * of parents.
+ */
+static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
+ .name = "clk-leaf-mux-set-rate-parent",
+ .init = clk_leaf_mux_set_rate_parent_test_init,
+ .exit = clk_leaf_mux_set_rate_parent_test_exit,
+ .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
+};
+
+struct clk_mux_notifier_rate_change {
+ bool done;
+ unsigned long old_rate;
+ unsigned long new_rate;
+ wait_queue_head_t wq;
+};
+
+struct clk_mux_notifier_ctx {
+ struct clk_multiple_parent_ctx mux_ctx;
+ struct clk *clk;
+ struct notifier_block clk_nb;
+ struct clk_mux_notifier_rate_change pre_rate_change;
+ struct clk_mux_notifier_rate_change post_rate_change;
+};
+
+#define NOTIFIER_TIMEOUT_MS 100
+
+static int clk_mux_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct clk_notifier_data *clk_data = data;
+ struct clk_mux_notifier_ctx *ctx = container_of(nb,
+ struct clk_mux_notifier_ctx,
+ clk_nb);
+
+ if (action & PRE_RATE_CHANGE) {
+ ctx->pre_rate_change.old_rate = clk_data->old_rate;
+ ctx->pre_rate_change.new_rate = clk_data->new_rate;
+ ctx->pre_rate_change.done = true;
+ wake_up_interruptible(&ctx->pre_rate_change.wq);
+ }
+
+ if (action & POST_RATE_CHANGE) {
+ ctx->post_rate_change.old_rate = clk_data->old_rate;
+ ctx->post_rate_change.new_rate = clk_data->new_rate;
+ ctx->post_rate_change.done = true;
+ wake_up_interruptible(&ctx->post_rate_change.wq);
+ }
+
+ return 0;
+}
+
+static int clk_mux_notifier_test_init(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx;
+ const char *top_parents[2] = { "parent-0", "parent-1" };
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+ ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
+ init_waitqueue_head(&ctx->pre_rate_change.wq);
+ init_waitqueue_head(&ctx->post_rate_change.wq);
+
+ ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.current_parent = 0;
+ ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
+ &clk_multiple_parents_mux_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
+ ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_mux_notifier_test_exit(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx = test->priv;
+ struct clk *clk = ctx->clk;
+
+ clk_notifier_unregister(clk, &ctx->clk_nb);
+ clk_put(clk);
+
+ clk_hw_unregister(&ctx->mux_ctx.hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
+}
+
+/*
+ * Test that if the we have a notifier registered on a mux, the core
+ * will notify us when we switch to another parent, and with the proper
+ * old and new rates.
+ */
+static void clk_mux_notifier_set_parent_test(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->mux_ctx.hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
+ int ret;
+
+ ret = clk_set_parent(clk, new_parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
+ ctx->pre_rate_change.done,
+ msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
+
+ ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
+ ctx->post_rate_change.done,
+ msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(new_parent);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_mux_notifier_test_cases[] = {
+ KUNIT_CASE(clk_mux_notifier_set_parent_test),
+ {}
+};
+
+/*
+ * Test suite for a mux with multiple parents, and a notifier registered
+ * on the mux.
+ *
+ * These tests exercise the behaviour of notifiers.
+ */
+static struct kunit_suite clk_mux_notifier_test_suite = {
+ .name = "clk-mux-notifier",
+ .init = clk_mux_notifier_test_init,
+ .exit = clk_mux_notifier_test_exit,
+ .test_cases = clk_mux_notifier_test_cases,
+};
+
kunit_test_suites(
+ &clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
+ &clk_multiple_parents_mux_test_suite,
+ &clk_mux_notifier_test_suite,
+ &clk_orphan_transparent_multiple_parent_mux_test_suite,
&clk_orphan_transparent_single_parent_test_suite,
+ &clk_orphan_two_level_root_last_test_suite,
&clk_range_test_suite,
&clk_range_maximize_test_suite,
- &clk_range_minimize_test_suite
+ &clk_range_minimize_test_suite,
+ &clk_single_parent_mux_test_suite,
+ &clk_uncached_test_suite
);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 67f601a41023..ee37d0be6877 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -165,7 +165,7 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
cla->cl.clk_hw = hw;
if (con_id) {
- strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
+ strscpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
}
@@ -346,46 +346,12 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
}
EXPORT_SYMBOL(clk_hw_register_clkdev);
-static void devm_clkdev_release(struct device *dev, void *res)
+static void devm_clkdev_release(void *res)
{
- clkdev_drop(*(struct clk_lookup **)res);
-}
-
-static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
-{
- struct clk_lookup **l = res;
-
- return *l == data;
+ clkdev_drop(res);
}
/**
- * devm_clk_release_clkdev - Resource managed clkdev lookup release
- * @dev: device this lookup is bound
- * @con_id: connection ID string on device
- * @dev_id: format string describing device name
- *
- * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id)
-{
- struct clk_lookup *cl;
- int rval;
-
- mutex_lock(&clocks_mutex);
- cl = clk_find(dev_id, con_id);
- mutex_unlock(&clocks_mutex);
-
- WARN_ON(!cl);
- rval = devres_release(dev, devm_clkdev_release,
- devm_clk_match_clkdev, cl);
- WARN_ON(rval);
-}
-EXPORT_SYMBOL(devm_clk_release_clkdev);
-
-/**
* devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
* @dev: device this lookup is bound
* @hw: struct clk_hw to associate with all clk_lookups
@@ -403,17 +369,13 @@ EXPORT_SYMBOL(devm_clk_release_clkdev);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id)
{
- int rval = -ENOMEM;
- struct clk_lookup **cl;
-
- cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
- if (cl) {
- rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
- if (!rval)
- devres_add(dev, cl);
- else
- devres_free(cl);
- }
- return rval;
+ struct clk_lookup *cl;
+ int rval;
+
+ rval = do_clk_register_clkdev(hw, &cl, con_id, dev_id);
+ if (rval)
+ return rval;
+
+ return devm_add_action_or_reset(dev, devm_clkdev_release, cl);
}
EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index 11178b79b483..be6f55d37b49 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -8,14 +8,10 @@ obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
-obj-$(CONFIG_ARCH_DAVINCI_DM644x) += pll-dm644x.o
-obj-$(CONFIG_ARCH_DAVINCI_DM646x) += pll-dm646x.o
obj-y += psc.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
-obj-$(CONFIG_ARCH_DAVINCI_DM644x) += psc-dm644x.o
-obj-$(CONFIG_ARCH_DAVINCI_DM646x) += psc-dm646x.o
endif
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index 77d18276bfe8..4103d605e804 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -510,8 +510,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
fck_clk = devm_clk_get(dev, "fck");
if (IS_ERR(fck_clk)) {
- if (PTR_ERR(fck_clk) != -EPROBE_DEFER)
- dev_err(dev, "Missing fck clock\n");
+ dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n");
return ERR_CAST(fck_clk);
}
diff --git a/drivers/clk/davinci/pll-dm644x.c b/drivers/clk/davinci/pll-dm644x.c
deleted file mode 100644
index 7650fadfaac8..000000000000
--- a/drivers/clk/davinci/pll-dm644x.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM644X
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm644x_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 1,
- .pllm_max = 32,
- .pllout_min_rate = 400000000,
- .pllout_max_rate = 600000000, /* 810MHz @ 1.3V, -810 only */
- .flags = PLL_HAS_CLKMODE | PLL_HAS_POSTDIV,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm644x-psc");
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm644x-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm644x_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 1,
- .pllm_max = 32,
- .pllout_min_rate = 400000000,
- .pllout_max_rate = 900000000,
- .flags = PLL_HAS_POSTDIV | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
-SYSCLK(2, pll2_sysclk2, pll2_pllen, 4, 0);
-
-int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll-dm646x.c b/drivers/clk/davinci/pll-dm646x.c
deleted file mode 100644
index 26982970df0e..000000000000
--- a/drivers/clk/davinci/pll-dm646x.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM646X
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm646x_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 14,
- .pllm_max = 32,
- .flags = PLL_HAS_CLKMODE,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 4, 0);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, 0);
-SYSCLK(6, pll1_sysclk6, pll1_pllen, 4, 0);
-SYSCLK(8, pll1_sysclk8, pll1_pllen, 4, 0);
-SYSCLK(9, pll1_sysclk9, pll1_pllen, 4, 0);
-
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm646x-psc");
- clk_register_clkdev(clk, NULL, "davinci-wdt");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm646x-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm646x_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 14,
- .pllm_max = 32,
- .flags = 0,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, SYSCLK_ALWAYS_ENABLED);
-
-int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 0d750433eb42..f862f5e2b3fc 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -98,7 +98,7 @@
* @hw: clk_hw for the pll
* @base: Base memory address
* @pllm_min: The minimum allowable PLLM[PLLM] value
- * @pllm_max: The maxiumum allowable PLLM[PLLM] value
+ * @pllm_max: The maximum allowable PLLM[PLLM] value
* @pllm_mask: Bitmask for PLLM[PLLM] value
*/
struct davinci_pll_clk {
@@ -890,14 +890,6 @@ static const struct platform_device_id davinci_pll_id_table[] = {
{ .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
{ .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
- { .name = "dm644x-pll1", .driver_data = (kernel_ulong_t)dm644x_pll1_init },
- { .name = "dm644x-pll2", .driver_data = (kernel_ulong_t)dm644x_pll2_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
- { .name = "dm646x-pll1", .driver_data = (kernel_ulong_t)dm646x_pll1_init },
- { .name = "dm646x-pll2", .driver_data = (kernel_ulong_t)dm646x_pll2_init },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index c2a453caa131..1773277bc690 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -130,11 +130,5 @@ int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cf
#ifdef CONFIG_ARCH_DAVINCI_DM355
int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
deleted file mode 100644
index 0cea6e0bd5f0..000000000000
--- a/drivers/clk/davinci/psc-dm644x.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM644x
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
-LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-
-static const struct davinci_lpsc_clk_info dm644x_psc_info[] = {
- LPSC(0, 0, vpss_master, pll1_sysclk3, vpss_master_clkdev, 0),
- LPSC(1, 0, vpss_slave, pll1_sysclk3, vpss_slave_clkdev, 0),
- LPSC(6, 0, emac, pll1_sysclk5, emac_clkdev, 0),
- LPSC(9, 0, usb, pll1_sysclk5, usb_clkdev, 0),
- LPSC(10, 0, ide, pll1_sysclk5, ide_clkdev, 0),
- LPSC(11, 0, vlynq, pll1_sysclk5, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk5, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd, pll1_sysclk5, mmcsd_clkdev, 0),
- LPSC(17, 0, asp0, pll1_sysclk5, asp0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
- LPSC(21, 0, uart2, pll1_auxclk, uart2_clkdev, 0),
- LPSC(22, 0, spi, pll1_sysclk5, NULL, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk5, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(39, 1, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(40, 1, vicp, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- { }
-};
-
-int dm644x_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm644x_psc_info, 41, base);
-}
-
-static struct clk_bulk_data dm644x_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk5" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm644x_psc_init_data = {
- .parent_clks = dm644x_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm644x_psc_parent_clks),
- .psc_init = &dm644x_psc_init,
-};
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
deleted file mode 100644
index 20012dc7471a..000000000000
--- a/drivers/clk/davinci/psc-dm646x.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM646x
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
-LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-
-static const struct davinci_lpsc_clk_info dm646x_psc_info[] = {
- LPSC(0, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(1, 0, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(4, 0, edma_cc, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(5, 0, edma_tc0, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(6, 0, edma_tc1, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, edma_tc2, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(8, 0, edma_tc3, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(10, 0, ide, pll1_sysclk4, ide_clkdev, 0),
- LPSC(14, 0, emac, pll1_sysclk3, emac_clkdev, 0),
- LPSC(16, 0, vpif0, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(17, 0, vpif1, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(21, 0, aemif, pll1_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(22, 0, mcasp0, pll1_sysclk3, mcasp0_clkdev, 0),
- LPSC(23, 0, mcasp1, pll1_sysclk3, mcasp1_clkdev, 0),
- LPSC(26, 0, uart0, aux_clkin, uart0_clkdev, 0),
- LPSC(27, 0, uart1, aux_clkin, uart1_clkdev, 0),
- LPSC(28, 0, uart2, aux_clkin, uart2_clkdev, 0),
- /* REVIST: disabling hangs system */
- LPSC(29, 0, pwm0, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
- /* REVIST: disabling hangs system */
- LPSC(30, 0, pwm1, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, i2c, pll1_sysclk3, i2c_clkdev, 0),
- LPSC(33, 0, gpio, pll1_sysclk3, gpio_clkdev, 0),
- LPSC(34, 0, timer0, pll1_sysclk3, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(35, 0, timer1, pll1_sysclk3, NULL, 0),
- { }
-};
-
-int dm646x_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm646x_psc_info, 46, base);
-}
-
-static struct clk_bulk_data dm646x_psc_parent_clks[] = {
- { .id = "ref_clk" },
- { .id = "aux_clkin" },
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_sysclk5" },
-};
-
-const struct davinci_psc_init_data dm646x_psc_init_data = {
- .parent_clks = dm646x_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm646x_psc_parent_clks),
- .psc_init = &dm646x_psc_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 7387e7f6276e..42a59dbd49c8 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -517,12 +517,6 @@ static const struct platform_device_id davinci_psc_id_table[] = {
#ifdef CONFIG_ARCH_DAVINCI_DM365
{ .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
- { .name = "dm644x-psc", .driver_data = (kernel_ulong_t)&dm644x_psc_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
- { .name = "dm646x-psc", .driver_data = (kernel_ulong_t)&dm646x_psc_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 69070f834391..5e382b675518 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -110,11 +110,5 @@ extern const struct davinci_psc_init_data dm355_psc_init_data;
#ifdef CONFIG_ARCH_DAVINCI_DM365
extern const struct davinci_psc_init_data dm365_psc_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-extern const struct davinci_psc_init_data dm644x_psc_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-extern const struct davinci_psc_init_data dm646x_psc_init_data;
-#endif
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 88b9b9285d22..e8aacb0ee6ac 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -12,6 +12,7 @@ mxc-clk-objs += clk-fixup-div.o
mxc-clk-objs += clk-fixup-mux.o
mxc-clk-objs += clk-frac-pll.o
mxc-clk-objs += clk-gate2.o
+mxc-clk-objs += clk-gate-93.o
mxc-clk-objs += clk-gate-exclusive.o
mxc-clk-objs += clk-pfd.o
mxc-clk-objs += clk-pfdv2.o
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index b44619aa5ca5..74a66b0203e4 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -9,22 +9,180 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include "clk.h"
+#define TIMEOUT_US 500U
+
#define CCM_DIV_SHIFT 0
#define CCM_DIV_WIDTH 8
#define CCM_MUX_SHIFT 8
#define CCM_MUX_MASK 3
#define CCM_OFF_SHIFT 24
+#define CCM_BUSY_SHIFT 28
+#define STAT_OFFSET 0x4
#define AUTHEN_OFFSET 0x30
#define TZ_NS_SHIFT 9
#define TZ_NS_MASK BIT(9)
+#define WHITE_LIST_SHIFT 16
+
+static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
+ 0, TIMEOUT_US);
+ if (ret)
+ pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 reg;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+
+ if (enable)
+ reg &= ~BIT(gate->bit_idx);
+ else
+ reg |= BIT(gate->bit_idx);
+
+ writel(reg, gate->reg);
+
+ imx93_clk_composite_wait_ready(hw, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
+{
+ imx93_clk_composite_gate_endisable(hw, 1);
+
+ return 0;
+}
+
+static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
+{
+ imx93_clk_composite_gate_endisable(hw, 0);
+}
+
+static const struct clk_ops imx93_clk_composite_gate_ops = {
+ .enable = imx93_clk_composite_gate_enable,
+ .disable = imx93_clk_composite_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static unsigned long
+imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long
+imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+static int
+imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ unsigned long flags = 0;
+ u32 val;
+ int ret;
+
+ value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl(divider->reg);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ ret = imx93_clk_composite_wait_ready(hw, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops imx93_clk_composite_divider_ops = {
+ .recalc_rate = imx93_clk_composite_divider_recalc_rate,
+ .round_rate = imx93_clk_composite_divider_round_rate,
+ .determine_rate = imx93_clk_composite_divider_determine_rate,
+ .set_rate = imx93_clk_composite_divider_set_rate,
+};
+
+static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+ val = val << mux->shift;
+ reg |= val;
+ writel(reg, mux->reg);
+
+ ret = imx93_clk_composite_wait_ready(hw, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return ret;
+}
+
+static int
+imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ return clk_mux_ops.determine_rate(hw, req);
+}
+
+static const struct clk_ops imx93_clk_composite_mux_ops = {
+ .get_parent = imx93_clk_composite_mux_get_parent,
+ .set_parent = imx93_clk_composite_mux_set_parent,
+ .determine_rate = imx93_clk_composite_mux_determine_rate,
+};
+
struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
- int num_parents, void __iomem *reg,
+ int num_parents, void __iomem *reg, u32 domain_id,
unsigned long flags)
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
@@ -33,6 +191,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
bool clk_ro = false;
+ u32 authen;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -55,7 +214,8 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
div->lock = &imx_ccm_lock;
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
- if (!(readl(reg + AUTHEN_OFFSET) & TZ_NS_MASK))
+ authen = readl(reg + AUTHEN_OFFSET);
+ if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
clk_ro = true;
if (clk_ro) {
@@ -74,9 +234,10 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
gate->flags = CLK_GATE_SET_TO_DISABLE;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &clk_mux_ops, div_hw,
- &clk_divider_ops, gate_hw,
- &clk_gate_ops, flags | CLK_SET_RATE_NO_REPARENT);
+ mux_hw, &imx93_clk_composite_mux_ops, div_hw,
+ &imx93_clk_composite_divider_ops, gate_hw,
+ &imx93_clk_composite_gate_ops,
+ flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-gate-93.c b/drivers/clk/imx/clk-gate-93.c
new file mode 100644
index 000000000000..ceb56b290394
--- /dev/null
+++ b/drivers/clk/imx/clk-gate-93.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define DIRECT_OFFSET 0x0
+
+/*
+ * 0b000 - LPCG will be OFF in any CPU mode.
+ * 0b100 - LPCG will be ON in any CPU mode.
+ */
+#define LPM_SETTING_OFF 0x0
+#define LPM_SETTING_ON 0x4
+
+#define LPM_CUR_OFFSET 0x1c
+
+#define AUTHEN_OFFSET 0x30
+#define CPULPM_EN BIT(2)
+#define TZ_NS_SHIFT 9
+#define TZ_NS_MASK BIT(9)
+
+#define WHITE_LIST_SHIFT 16
+
+struct imx93_clk_gate {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 bit_idx;
+ u32 val;
+ u32 mask;
+ spinlock_t *lock;
+ unsigned int *share_count;
+};
+
+#define to_imx93_clk_gate(_hw) container_of(_hw, struct imx93_clk_gate, hw)
+
+static void imx93_clk_gate_do_hardware(struct clk_hw *hw, bool enable)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ u32 val;
+
+ val = readl(gate->reg + AUTHEN_OFFSET);
+ if (val & CPULPM_EN) {
+ val = enable ? LPM_SETTING_ON : LPM_SETTING_OFF;
+ writel(val, gate->reg + LPM_CUR_OFFSET);
+ } else {
+ val = readl(gate->reg + DIRECT_OFFSET);
+ val &= ~(gate->mask << gate->bit_idx);
+ if (enable)
+ val |= (gate->val & gate->mask) << gate->bit_idx;
+ writel(val, gate->reg + DIRECT_OFFSET);
+ }
+}
+
+static int imx93_clk_gate_enable(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (gate->share_count && (*gate->share_count)++ > 0)
+ goto out;
+
+ imx93_clk_gate_do_hardware(hw, true);
+out:
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void imx93_clk_gate_disable(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (gate->share_count) {
+ if (WARN_ON(*gate->share_count == 0))
+ goto out;
+ else if (--(*gate->share_count) > 0)
+ goto out;
+ }
+
+ imx93_clk_gate_do_hardware(hw, false);
+out:
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int imx93_clk_gate_reg_is_enabled(struct imx93_clk_gate *gate)
+{
+ u32 val = readl(gate->reg + AUTHEN_OFFSET);
+
+ if (val & CPULPM_EN) {
+ val = readl(gate->reg + LPM_CUR_OFFSET);
+ if (val == LPM_SETTING_ON)
+ return 1;
+ } else {
+ val = readl(gate->reg);
+ if (((val >> gate->bit_idx) & gate->mask) == gate->val)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int imx93_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ ret = imx93_clk_gate_reg_is_enabled(gate);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return ret;
+}
+
+static void imx93_clk_gate_disable_unused(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (!gate->share_count || *gate->share_count == 0)
+ imx93_clk_gate_do_hardware(hw, false);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static const struct clk_ops imx93_clk_gate_ops = {
+ .enable = imx93_clk_gate_enable,
+ .disable = imx93_clk_gate_disable,
+ .disable_unused = imx93_clk_gate_disable_unused,
+ .is_enabled = imx93_clk_gate_is_enabled,
+};
+
+static const struct clk_ops imx93_clk_gate_ro_ops = {
+ .is_enabled = imx93_clk_gate_is_enabled,
+};
+
+struct clk_hw *imx93_clk_gate(struct device *dev, const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u32 bit_idx, u32 val,
+ u32 mask, u32 domain_id, unsigned int *share_count)
+{
+ struct imx93_clk_gate *gate;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+ u32 authen;
+
+ gate = kzalloc(sizeof(struct imx93_clk_gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = reg;
+ gate->lock = &imx_ccm_lock;
+ gate->bit_idx = bit_idx;
+ gate->val = val;
+ gate->mask = mask;
+ gate->share_count = share_count;
+
+ init.name = name;
+ init.ops = &imx93_clk_gate_ops;
+ init.flags = flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ gate->hw.init = &init;
+ hw = &gate->hw;
+
+ authen = readl(reg + AUTHEN_OFFSET);
+ if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
+ init.ops = &imx93_clk_gate_ro_ops;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(gate);
+ return ERR_PTR(ret);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(imx93_clk_gate);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index e89db568f5a8..652ae58c2735 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -665,8 +665,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
- hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
+ hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0);
hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0);
hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index dcc41d178238..99cff1fd108b 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -28,6 +28,11 @@ enum clk_sel {
MAX_SEL
};
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_mub;
+
static const char *parent_names[MAX_SEL][4] = {
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "video_pll"},
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "sys_pll_pfd2_div2"},
@@ -146,6 +151,7 @@ static const struct imx93_clk_ccgr {
char *parent_name;
u32 off;
unsigned long flags;
+ u32 *shared_count;
} ccgr_array[] = {
{ IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
/* M33 critical clk for system run */
@@ -158,8 +164,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU_A_GATE, "mu_a", "bus_aon_root", 0x84c0, },
- { IMX93_CLK_MU_B_GATE, "mu_b", "bus_aon_root", 0x8500, },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
@@ -210,9 +218,12 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_USDHC1_GATE, "usdhc1", "usdhc1_root", 0x9380, },
{ IMX93_CLK_USDHC2_GATE, "usdhc2", "usdhc2_root", 0x93c0, },
{ IMX93_CLK_USDHC3_GATE, "usdhc3", "usdhc3_root", 0x9400, },
- { IMX93_CLK_SAI1_GATE, "sai1", "sai1_root", 0x9440, },
- { IMX93_CLK_SAI2_GATE, "sai2", "sai2_root", 0x9480, },
- { IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, },
+ { IMX93_CLK_SAI1_GATE, "sai1", "sai1_root", 0x9440, 0, &share_count_sai1},
+ { IMX93_CLK_SAI1_IPG, "sai1_ipg_clk", "bus_aon_root", 0x9440, 0, &share_count_sai1},
+ { IMX93_CLK_SAI2_GATE, "sai2", "sai2_root", 0x9480, 0, &share_count_sai2},
+ { IMX93_CLK_SAI2_IPG, "sai2_ipg_clk", "bus_wakeup_root", 0x9480, 0, &share_count_sai2},
+ { IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
+ { IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
{ IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
{ IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
@@ -293,16 +304,15 @@ static int imx93_clocks_probe(struct platform_device *pdev)
root = &root_array[i];
clks[root->clk] = imx93_clk_composite_flags(root->name,
parent_names[root->sel],
- 4, base + root->off,
+ 4, base + root->off, 3,
root->flags);
}
for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
ccgr = &ccgr_array[i];
- clks[ccgr->clk] = imx_clk_hw_gate4_flags(ccgr->name,
- ccgr->parent_name,
- base + ccgr->off, 0,
- ccgr->flags);
+ clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
+ ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+ ccgr->shared_count);
}
imx_check_clk_hws(clks, IMX93_CLK_END);
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index c56e406138db..1e6870f3671f 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -695,7 +695,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
pr_warn("%s: failed to attached the power domain %d\n",
name, ret);
- platform_device_add(pdev);
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
/* For API backwards compatiblilty, simply return NULL for success */
return NULL;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 5061a06468df..dd49f90110e8 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -445,11 +445,16 @@ struct clk_hw *imx93_clk_composite_flags(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
+ u32 domain_id,
unsigned long flags);
-#define imx93_clk_composite(name, parent_names, num_parents, reg) \
- imx93_clk_composite_flags(name, parent_names, num_parents, reg, \
+#define imx93_clk_composite(name, parent_names, num_parents, reg, domain_id) \
+ imx93_clk_composite_flags(name, parent_names, num_parents, reg, domain_id \
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+struct clk_hw *imx93_clk_gate(struct device *dev, const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u32 bit_idx, u32 val,
+ u32 mask, u32 domain_id, unsigned int *share_count);
+
struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index d5936cfb3bee..843cea0c7a44 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -259,6 +259,43 @@ config COMMON_CLK_MT6779_AUDSYS
help
This driver supports Mediatek MT6779 audsys clocks.
+config COMMON_CLK_MT6795
+ tristate "Clock driver for MediaTek MT6795"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT6795 basic clocks and clocks
+ required for various peripherals found on MediaTek.
+
+config COMMON_CLK_MT6795_MFGCFG
+ tristate "Clock driver for MediaTek MT6795 mfgcfg"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 mfgcfg clocks.
+
+config COMMON_CLK_MT6795_MMSYS
+ tristate "Clock driver for MediaTek MT6795 mmsys"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 mmsys clocks.
+
+config COMMON_CLK_MT6795_VDECSYS
+ tristate "Clock driver for MediaTek MT6795 VDECSYS"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 vdecsys clocks.
+
+config COMMON_CLK_MT6795_VENCSYS
+ tristate "Clock driver for MediaTek MT6795 VENCSYS"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 vencsys clocks.
+
config COMMON_CLK_MT6797
bool "Clock driver for MediaTek MT6797"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -608,6 +645,56 @@ config COMMON_CLK_MT8195
help
This driver supports MediaTek MT8195 clocks.
+config COMMON_CLK_MT8365
+ tristate "Clock driver for MediaTek MT8365"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT8365 basic clocks.
+
+config COMMON_CLK_MT8365_APU
+ tristate "Clock driver for MediaTek MT8365 apu"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 apu clocks.
+
+config COMMON_CLK_MT8365_CAM
+ tristate "Clock driver for MediaTek MT8365 cam"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 cam clocks.
+
+config COMMON_CLK_MT8365_MFG
+ tristate "Clock driver for MediaTek MT8365 mfg"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 mfg clocks.
+
+config COMMON_CLK_MT8365_MMSYS
+ tristate "Clock driver for MediaTek MT8365 mmsys"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 mmsys clocks.
+
+config COMMON_CLK_MT8365_VDEC
+ tristate "Clock driver for MediaTek MT8365 vdec"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 vdec clocks.
+
+config COMMON_CLK_MT8365_VENC
+ tristate "Clock driver for MediaTek MT8365 venc"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 venc clocks.
+
config COMMON_CLK_MT8516
bool "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index caf2ce93d666..ea3b73240303 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -17,6 +17,12 @@ obj-$(CONFIG_COMMON_CLK_MT6779_VDECSYS) += clk-mt6779-vdec.o
obj-$(CONFIG_COMMON_CLK_MT6779_VENCSYS) += clk-mt6779-venc.o
obj-$(CONFIG_COMMON_CLK_MT6779_MFGCFG) += clk-mt6779-mfg.o
obj-$(CONFIG_COMMON_CLK_MT6779_AUDSYS) += clk-mt6779-aud.o
+obj-$(CONFIG_COMMON_CLK_MT6795) += clk-mt6795-apmixedsys.o clk-mt6795-infracfg.o \
+ clk-mt6795-pericfg.o clk-mt6795-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT6795_MFGCFG) += clk-mt6795-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT6795_MMSYS) += clk-mt6795-mm.o
+obj-$(CONFIG_COMMON_CLK_MT6795_VDECSYS) += clk-mt6795-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT6795_VENCSYS) += clk-mt6795-vencsys.o
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
@@ -97,5 +103,12 @@ obj-$(CONFIG_COMMON_CLK_MT8195) += clk-mt8195-apmixedsys.o clk-mt8195-topckgen.o
clk-mt8195-venc.o clk-mt8195-vpp0.o clk-mt8195-vpp1.o \
clk-mt8195-wpe.o clk-mt8195-imp_iic_wrap.o \
clk-mt8195-apusys_pll.o
+obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365.o
+obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
+obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8365_MFG) += clk-mt8365-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8365_MMSYS) += clk-mt8365-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8365_VDEC) += clk-mt8365-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8365_VENC) += clk-mt8365-venc.o
obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
index fc3d4146f482..60e34f124250 100644
--- a/drivers/clk/mediatek/clk-apmixed.c
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -70,7 +70,7 @@ static const struct clk_ops mtk_ref2usb_tx_ops = {
.unprepare = mtk_ref2usb_tx_unprepare,
};
-struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name,
+struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg)
{
struct mtk_ref2usb_tx *tx;
@@ -98,5 +98,15 @@ struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name,
return &tx->hw;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_ref2usb_tx);
+
+void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw)
+{
+ struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw);
+
+ clk_hw_unregister(hw);
+ kfree(tx);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_ref2usb_tx);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 2b5d48591738..25618eff6f2a 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -150,6 +150,7 @@ err:
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_cpumuxes);
void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
struct clk_hw_onecell_data *clk_data)
@@ -166,5 +167,6 @@ void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_cpumuxes);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 421806236228..0c867136e49d 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -261,6 +261,7 @@ err:
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gates_with_dev);
int mtk_clk_register_gates(struct device_node *node,
const struct mtk_gate *clks, int num,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 662a8ab3fbb1..435ed4819d56 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -94,33 +94,23 @@ static const struct mtk_gate bdp_clks[] = {
GATE_BDP1(CLK_BDP_HDMI_MON, "hdmi_mon", "hdmi_0_pll340m", 16),
};
-static const struct of_device_id of_match_clk_mt2701_bdp[] = {
- { .compatible = "mediatek,mt2701-bdpsys", },
- {}
+static const struct mtk_clk_desc bdp_desc = {
+ .clks = bdp_clks,
+ .num_clks = ARRAY_SIZE(bdp_clks),
};
-static int clk_mt2701_bdp_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_BDP_NR);
-
- mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_bdp[] = {
+ {
+ .compatible = "mediatek,mt2701-bdpsys",
+ .data = &bdp_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_bdp_drv = {
- .probe = clk_mt2701_bdp_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-bdp",
.of_match_table = of_match_clk_mt2701_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index c4f3cd26df60..7e53deb7f990 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -36,33 +36,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_VENC, "img_venc", "mm_sel", 9),
};
-static const struct of_device_id of_match_clk_mt2701_img[] = {
- { .compatible = "mediatek,mt2701-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt2701_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_img[] = {
+ {
+ .compatible = "mediatek,mt2701-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_img_drv = {
- .probe = clk_mt2701_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-img",
.of_match_table = of_match_clk_mt2701_img,
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index a2f18117f27a..d3089da0ab62 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -47,33 +47,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt2701_vdec[] = {
- { .compatible = "mediatek,mt2701-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt2701_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_vdec[] = {
+ {
+ .compatible = "mediatek,mt2701-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_vdec_drv = {
- .probe = clk_mt2701_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-vdec",
.of_match_table = of_match_clk_mt2701_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 9acab4357133..684d03e9f6de 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -58,33 +58,23 @@ static const struct mtk_gate bdp_clks[] = {
GATE_BDP(CLK_BDP_TVD_CBUS, "bdp_tvd_cbus", "mm_sel", 30),
};
-static int clk_mt2712_bdp_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_BDP_NR_CLK);
-
- mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc bdp_desc = {
+ .clks = bdp_clks,
+ .num_clks = ARRAY_SIZE(bdp_clks),
+};
static const struct of_device_id of_match_clk_mt2712_bdp[] = {
- { .compatible = "mediatek,mt2712-bdpsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-bdpsys",
+ .data = &bdp_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_bdp_drv = {
- .probe = clk_mt2712_bdp_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-bdp",
.of_match_table = of_match_clk_mt2712_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 5cc143e65e42..335049cdc856 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -36,33 +36,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_CAM_SV2_EN, "img_cam_sv2_en", "mm_sel", 11),
};
-static int clk_mt2712_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt2712_img[] = {
- { .compatible = "mediatek,mt2712-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_img_drv = {
- .probe = clk_mt2712_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-img",
.of_match_table = of_match_clk_mt2712_img,
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 31fc30370d98..07ba7c5e80af 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -32,33 +32,23 @@ static const struct mtk_gate jpgdec_clks[] = {
GATE_JPGDEC(CLK_JPGDEC_JPGDEC, "jpgdec_jpgdec", "jpgdec_sel", 4),
};
-static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_JPGDEC_NR_CLK);
-
- mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc jpgdec_desc = {
+ .clks = jpgdec_clks,
+ .num_clks = ARRAY_SIZE(jpgdec_clks),
+};
static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
- { .compatible = "mediatek,mt2712-jpgdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-jpgdecsys",
+ .data = &jpgdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_jpgdec_drv = {
- .probe = clk_mt2712_jpgdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-jpgdec",
.of_match_table = of_match_clk_mt2712_jpgdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index a4d09675bf18..42f8cf3ecf4c 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -31,33 +31,23 @@ static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
};
-static int clk_mt2712_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
-
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt2712_mfg[] = {
- { .compatible = "mediatek,mt2712-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt2712-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_mfg_drv = {
- .probe = clk_mt2712_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-mfg",
.of_match_table = of_match_clk_mt2712_mfg,
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index af13f43dd831..6296ed5c5b55 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -50,33 +50,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_IMGRZ_CKEN, "vdec_imgrz_cken", "vdec_sel", 1),
};
-static int clk_mt2712_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
static const struct of_device_id of_match_clk_mt2712_vdec[] = {
- { .compatible = "mediatek,mt2712-vdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_vdec_drv = {
- .probe = clk_mt2712_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-vdec",
.of_match_table = of_match_clk_mt2712_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index abc08a029753..b9bfc35de629 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -33,33 +33,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SMI_LARB6, "venc_smi_larb6", "jpgdec_sel", 12),
};
-static int clk_mt2712_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt2712_venc[] = {
- { .compatible = "mediatek,mt2712-vencsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_venc_drv = {
- .probe = clk_mt2712_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-venc",
.of_match_table = of_match_clk_mt2712_venc,
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 9c6e9caad597..0aa6c0d352ca 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -64,33 +64,23 @@ static const struct mtk_gate audio_clks[] = {
"audio_ck", 7),
};
-static int clk_mt6765_audio_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks,
- ARRAY_SIZE(audio_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
+};
static const struct of_device_id of_match_clk_mt6765_audio[] = {
- { .compatible = "mediatek,mt6765-audsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-audsys",
+ .data = &audio_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_audio_drv = {
- .probe = clk_mt6765_audio_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-audio",
.of_match_table = of_match_clk_mt6765_audio,
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index 2586d3ac4cd4..25f2bef38126 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -39,32 +39,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_CCU, "cam_ccu", "mm_ck", 12),
};
-static int clk_mt6765_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
static const struct of_device_id of_match_clk_mt6765_cam[] = {
- { .compatible = "mediatek,mt6765-camsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_cam_drv = {
- .probe = clk_mt6765_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-cam",
.of_match_table = of_match_clk_mt6765_cam,
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 8cc95b98921e..a62303ef4f41 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -35,32 +35,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_RSC, "img_rsc", "mm_ck", 5),
};
-static int clk_mt6765_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt6765_img[] = {
- { .compatible = "mediatek,mt6765-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_img_drv = {
- .probe = clk_mt6765_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-img",
.of_match_table = of_match_clk_mt6765_img,
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index c816e26a95f9..25c829fc3866 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -32,33 +32,23 @@ static const struct mtk_gate mipi0a_clks[] = {
"mipi0a_csr_0a", "f_fseninf_ck", 1),
};
-static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MIPI0A_NR_CLK);
-
- mtk_clk_register_gates(node, mipi0a_clks,
- ARRAY_SIZE(mipi0a_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mipi0a_desc = {
+ .clks = mipi0a_clks,
+ .num_clks = ARRAY_SIZE(mipi0a_clks),
+};
static const struct of_device_id of_match_clk_mt6765_mipi0a[] = {
- { .compatible = "mediatek,mt6765-mipi0a", },
- {}
+ {
+ .compatible = "mediatek,mt6765-mipi0a",
+ .data = &mipi0a_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_mipi0a_drv = {
- .probe = clk_mt6765_mipi0a_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mipi0a",
.of_match_table = of_match_clk_mt6765_mipi0a,
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index ee6d3b859a6c..bda774668a36 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -61,32 +61,23 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM(CLK_MM_F26M_HRTWT, "mm_hrtwt", "f_f26m_ck", 29),
};
-static int clk_mt6765_mm_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
static const struct of_device_id of_match_clk_mt6765_mm[] = {
- { .compatible = "mediatek,mt6765-mmsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-mmsys",
+ .data = &mm_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_mm_drv = {
- .probe = clk_mt6765_mm_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mm",
.of_match_table = of_match_clk_mt6765_mm,
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index d8045979d48a..2bc1fbde87da 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -34,33 +34,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12),
};
-static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks,
- ARRAY_SIZE(venc_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt6765_vcodec[] = {
- { .compatible = "mediatek,mt6765-vcodecsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-vcodecsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_vcodec_drv = {
- .probe = clk_mt6765_vcodec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-vcodec",
.of_match_table = of_match_clk_mt6765_vcodec,
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 97e44abb7e87..6e473ae1fd90 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -89,26 +89,23 @@ static const struct mtk_gate audio_clks[] = {
"audio_h_sel", 31),
};
-static const struct of_device_id of_match_clk_mt6779_aud[] = {
- { .compatible = "mediatek,mt6779-audio", },
- {}
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
};
-static int clk_mt6779_aud_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_aud[] = {
+ {
+ .compatible = "mediatek,mt6779-audio",
+ .data = &audio_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_aud_drv = {
- .probe = clk_mt6779_aud_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-aud",
.of_match_table = of_match_clk_mt6779_aud,
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 9c5117aae146..7be3db90fa4a 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -38,26 +38,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_FAKE_ENG, "camsys_fake_eng", "cam_sel", 14),
};
-static const struct of_device_id of_match_clk_mt6779_cam[] = {
- { .compatible = "mediatek,mt6779-camsys", },
- {}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
};
-static int clk_mt6779_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_cam[] = {
+ {
+ .compatible = "mediatek,mt6779-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_cam_drv = {
- .probe = clk_mt6779_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-cam",
.of_match_table = of_match_clk_mt6779_cam,
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 801271477d46..9bc51fc82dbd 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -30,26 +30,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_WPE_A, "imgsys_wpe_a", "img_sel", 7),
};
-static const struct of_device_id of_match_clk_mt6779_img[] = {
- { .compatible = "mediatek,mt6779-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt6779_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_img[] = {
+ {
+ .compatible = "mediatek,mt6779-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_img_drv = {
- .probe = clk_mt6779_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-img",
.of_match_table = of_match_clk_mt6779_img,
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index f67814ca7dfb..92e9d1ade422 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -32,26 +32,23 @@ static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "ipe_sel", 6),
};
-static const struct of_device_id of_match_clk_mt6779_ipe[] = {
- { .compatible = "mediatek,mt6779-ipesys", },
- {}
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
};
-static int clk_mt6779_ipe_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPE_NR_CLK);
-
- mtk_clk_register_gates(node, ipe_clks, ARRAY_SIZE(ipe_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_ipe[] = {
+ {
+ .compatible = "mediatek,mt6779-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_ipe_drv = {
- .probe = clk_mt6779_ipe_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-ipe",
.of_match_table = of_match_clk_mt6779_ipe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index fc7387b59758..efc793a1969a 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -27,26 +27,23 @@ static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFGCFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
};
-static int clk_mt6779_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MFGCFG_NR_CLK);
-
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt6779_mfg[] = {
- { .compatible = "mediatek,mt6779-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt6779-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6779_mfg_drv = {
- .probe = clk_mt6779_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-mfg",
.of_match_table = of_match_clk_mt6779_mfg,
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index 7e195b082e86..3209a6518d5b 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -39,26 +39,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1_cken", "vdec_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6779_vdec[] = {
- { .compatible = "mediatek,mt6779-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt6779_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_GCON_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_vdec[] = {
+ {
+ .compatible = "mediatek,mt6779-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_vdec_drv = {
- .probe = clk_mt6779_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-vdec",
.of_match_table = of_match_clk_mt6779_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index 573efa87c9bd..c25035c0f334 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -30,26 +30,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC_I(CLK_VENC_GCON_GALS, "venc_gals", "venc_sel", 28),
};
-static const struct of_device_id of_match_clk_mt6779_venc[] = {
- { .compatible = "mediatek,mt6779-vencsys", },
- {}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
};
-static int clk_mt6779_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_GCON_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_venc[] = {
+ {
+ .compatible = "mediatek,mt6779-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_venc_drv = {
- .probe = clk_mt6779_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-venc",
.of_match_table = of_match_clk_mt6779_venc,
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
new file mode 100644
index 000000000000..59761c72d3bc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define REG_REF2USB 0x8
+#define REG_AP_PLL_CON7 0x1c
+ #define MD1_MTCMOS_OFF BIT(0)
+ #define MD1_MEM_OFF BIT(1)
+ #define MD1_CLK_OFF BIT(4)
+ #define MD1_ISO_OFF BIT(8)
+
+#define MT6795_PLL_FMAX (3000UL * MHZ)
+#define MT6795_CON0_EN BIT(0)
+#define MT6795_CON0_RST_BAR BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = MT6795_CON0_EN | _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = MT6795_CON0_RST_BAR, \
+ .fmax = MT6795_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = NULL, \
+ .pll_en_bit = 0, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMCA53PLL, "armca53pll", 0x200, 0x20c, 0, PLL_AO,
+ 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR,
+ 21, 0x220, 4, 0x0, 0x224, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000101, HAVE_RST_BAR,
+ 7, 0x230, 4, 0x0, 0x234, 14),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0, 0x244, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a8, 0x2a4, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2bc, 0x2b8, 0),
+};
+
+static void clk_mt6795_apmixed_setup_md1(void __iomem *base)
+{
+ void __iomem *reg = base + REG_AP_PLL_CON7;
+
+ /* Turn on MD1 internal clock */
+ writel(readl(reg) & ~MD1_CLK_OFF, reg);
+
+ /* Unlock MD1's MTCMOS power path */
+ writel(readl(reg) & ~MD1_MTCMOS_OFF, reg);
+
+ /* Turn on ISO */
+ writel(readl(reg) & ~MD1_ISO_OFF, reg);
+
+ /* Turn on memory */
+ writel(readl(reg) & ~MD1_MEM_OFF, reg);
+}
+
+static const struct of_device_id of_match_clk_mt6795_apmixed[] = {
+ { .compatible = "mediatek,mt6795-apmixedsys" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ hw = mtk_clk_register_ref2usb_tx("ref2usb_tx", "clk26m", base + REG_REF2USB);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ dev_err(dev, "Failed to register ref2usb_tx: %d\n", ret);
+ goto unregister_plls;
+ }
+ clk_data->hws[CLK_APMIXED_REF2USB_TX] = hw;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret) {
+ dev_err(dev, "Cannot register clock provider: %d\n", ret);
+ goto unregister_ref2usb;
+ }
+
+ /* Setup MD1 to avoid random crashes */
+ dev_dbg(dev, "Performing initial setup for MD1\n");
+ clk_mt6795_apmixed_setup_md1(base);
+
+ return 0;
+
+unregister_ref2usb:
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_apmixed_drv = {
+ .probe = clk_mt6795_apmixed_probe,
+ .remove = clk_mt6795_apmixed_remove,
+ .driver = {
+ .name = "clk-mt6795-apmixed",
+ .of_match_table = of_match_clk_mt6795_apmixed,
+ },
+};
+module_platform_driver(clk_mt6795_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 apmixed clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
new file mode 100644
index 000000000000..df7eed6e071e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <dt-bindings/reset/mediatek,mt6795-resets.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x0040,
+ .clr_ofs = 0x0044,
+ .sta_ofs = 0x0048,
+};
+
+static const char * const ca53_c0_parents[] = {
+ "clk26m",
+ "armca53pll",
+ "mainpll",
+ "univpll"
+};
+
+static const char * const ca53_c1_parents[] = {
+ "clk26m",
+ "armca53pll",
+ "mainpll",
+ "univpll"
+};
+
+static const struct mtk_composite cpu_muxes[] = {
+ MUX(CLK_INFRA_CA53_C0_SEL, "infra_ca53_c0_sel", ca53_c0_parents, 0x00, 0, 2),
+ MUX(CLK_INFRA_CA53_C1_SEL, "infra_ca53_c1_sel", ca53_c1_parents, 0x00, 2, 2),
+};
+
+static const struct mtk_gate infra_gates[] = {
+ GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+ GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1),
+ GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5),
+ GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+ GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
+ GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+ GATE_ICG(CLK_INFRA_MD1MCU, "infra_md1mcu", "clk26m", 9),
+ GATE_ICG(CLK_INFRA_MD1BUS, "infra_md1bus", "axi_sel", 10),
+ GATE_ICG(CLK_INFRA_MD1DBB, "infra_dbb", "axi_sel", 11),
+ GATE_ICG(CLK_INFRA_DEVICE_APC, "infra_devapc", "clk26m", 12),
+ GATE_ICG(CLK_INFRA_TRNG, "infra_trng", "axi_sel", 13),
+ GATE_ICG(CLK_INFRA_MD1LTE, "infra_md1lte", "axi_sel", 14),
+ GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "cpum_ck", 15),
+ GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+};
+
+static u16 infra_ao_rst_ofs[] = { 0x30, 0x34 };
+
+static u16 infra_ao_idx_map[] = {
+ [MT6795_INFRA_RST0_SCPSYS_RST] = 0 * RST_NR_PER_BANK + 5,
+ [MT6795_INFRA_RST0_PMIC_WRAP_RST] = 0 * RST_NR_PER_BANK + 7,
+ [MT6795_INFRA_RST1_MIPI_DSI_RST] = 1 * RST_NR_PER_BANK + 4,
+ [MT6795_INFRA_RST1_MIPI_CSI_RST] = 1 * RST_NR_PER_BANK + 7,
+ [MT6795_INFRA_RST1_MM_IOMMU_RST] = 1 * RST_NR_PER_BANK + 15,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
+static const struct of_device_id of_match_clk_mt6795_infracfg[] = {
+ { .compatible = "mediatek,mt6795-infracfg" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_gates(node, infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_cpumuxes;
+
+ return 0;
+
+unregister_cpumuxes:
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_infracfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_infracfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-infracfg",
+ .of_match_table = of_match_clk_mt6795_infracfg,
+ },
+ .probe = clk_mt6795_infracfg_probe,
+ .remove = clk_mt6795_infracfg_remove,
+};
+module_platform_driver(clk_mt6795_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 infracfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
new file mode 100644
index 000000000000..ee7aab24eb24
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "axi_mfg_in_sel", 0),
+ GATE_MFG(CLK_MFG_BMEM, "mfg_bmem", "mem_mfg_in_sel", 1),
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 2),
+ GATE_MFG(CLK_MFG_B26M, "mfg_b26m", "clk26m", 3),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_mfg[] = {
+ { .compatible = "mediatek,mt6795-mfgcfg", .data = &mfg_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_mfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-mfg",
+ .of_match_table = of_match_clk_mt6795_mfg,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt6795_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
new file mode 100644
index 000000000000..fd73f202f292
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+static const struct mtk_gate mm_gates[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_CROP, "mm_mdp_crop", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "clk32k", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+};
+
+static int clk_mt6795_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_gates(node, mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_mm_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_mm_drv = {
+ .driver = {
+ .name = "clk-mt6795-mm",
+ },
+ .probe = clk_mt6795_mm_probe,
+ .remove = clk_mt6795_mm_remove,
+};
+module_platform_driver(clk_mt6795_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 MultiMedia clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
new file mode 100644
index 000000000000..cb28d35dad59
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <dt-bindings/reset/mediatek,mt6795-resets.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_PERI(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr)
+
+static DEFINE_SPINLOCK(mt6795_peri_clk_lock);
+
+static const struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x0010,
+ .sta_ofs = 0x0018,
+};
+
+static const char * const uart_ck_sel_parents[] = {
+ "clk26m",
+ "uart_sel",
+};
+
+static const struct mtk_composite peri_clks[] = {
+ MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
+ MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
+ MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
+ MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
+};
+
+static const struct mtk_gate peri_gates[] = {
+ GATE_PERI(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0),
+ GATE_PERI(CLK_PERI_THERM, "peri_therm", "axi_sel", 1),
+ GATE_PERI(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2),
+ GATE_PERI(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3),
+ GATE_PERI(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4),
+ GATE_PERI(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5),
+ GATE_PERI(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6),
+ GATE_PERI(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7),
+ GATE_PERI(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8),
+ GATE_PERI(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9),
+ GATE_PERI(CLK_PERI_USB0, "peri_usb0", "usb30_sel", 10),
+ GATE_PERI(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11),
+ GATE_PERI(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12),
+ GATE_PERI(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13),
+ GATE_PERI(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14),
+ GATE_PERI(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15),
+ GATE_PERI(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16),
+ GATE_PERI(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17),
+ GATE_PERI(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18),
+ GATE_PERI(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19),
+ GATE_PERI(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20),
+ GATE_PERI(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21),
+ GATE_PERI(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22),
+ GATE_PERI(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23),
+ GATE_PERI(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24),
+ GATE_PERI(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25),
+ GATE_PERI(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26),
+ GATE_PERI(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27),
+ GATE_PERI(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28),
+ GATE_PERI(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29),
+};
+
+static u16 peri_rst_ofs[] = { 0x0 };
+
+static u16 peri_idx_map[] = {
+ [MT6795_PERI_NFI_SW_RST] = 14,
+ [MT6795_PERI_THERM_SW_RST] = 16,
+ [MT6795_PERI_MSDC1_SW_RST] = 20,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = peri_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(peri_rst_ofs),
+ .rst_idx_map = peri_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(peri_idx_map),
+};
+
+static const struct of_device_id of_match_clk_mt6795_pericfg[] = {
+ { .compatible = "mediatek,mt6795-pericfg" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+ &mt6795_peri_clk_lock, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_pericfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ mtk_clk_unregister_gates(peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_pericfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-pericfg",
+ .of_match_table = of_match_clk_mt6795_pericfg,
+ },
+ .probe = clk_mt6795_pericfg_probe,
+ .remove = clk_mt6795_pericfg_remove,
+};
+module_platform_driver(clk_mt6795_pericfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 pericfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
new file mode 100644
index 000000000000..2948dd1aee8f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
+#define TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _reg, \
+ (_reg + 0x4), (_reg + 0x8), _shift, _width, \
+ _gate, 0, -1, _flags)
+
+#define TOP_MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, CLK_SET_RATE_PARENT | _flags)
+
+static DEFINE_SPINLOCK(mt6795_top_clk_lock);
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d8",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const axi_mfg_in_parents[] = {
+ "clk26m",
+ "axi_sel",
+ "dmpll_d2"
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "univpll1_d4",
+ "dmpll_d8"
+};
+
+static const char * const cci400_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "clk26m",
+ "clk26m",
+ "univpll_d2",
+ "syspll_d2",
+ "msdcpll_ck",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "clk26m",
+ "clk26m",
+ "tvdpll_d8",
+ "tvdpll_d16"
+};
+
+static const char * const i2s0_m_ck_parents[] = {
+ "apll1_div1",
+ "apll2_div1"
+};
+
+static const char * const i2s1_m_ck_parents[] = {
+ "apll1_div2",
+ "apll2_div2"
+};
+
+static const char * const i2s2_m_ck_parents[] = {
+ "apll1_div3",
+ "apll2_div3"
+};
+
+static const char * const i2s3_m_ck_parents[] = {
+ "apll1_div4",
+ "apll2_div4"
+};
+
+static const char * const i2s3_b_ck_parents[] = {
+ "apll1_div5",
+ "apll2_div5"
+};
+
+static const char * const irda_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll2_d4",
+ "dmpll_d8",
+};
+
+static const char * const mem_mfg_in_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll2_d2"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_d2",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2"
+};
+
+static const char * const mjc_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "syspll_d5",
+ "syspll1_d2",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "msdcpll_d4",
+ "vencpll_d4",
+ "tvdpll_ck",
+ "univpll_d2",
+ "univpll1_d2",
+ "mmpll_ck"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d8",
+ "dmpll_d16"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "dmpll_d4"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+ "clk26m",
+ "univpll3_d2",
+ "usb_syspll_125m",
+ "univpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "mmpll_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const struct mtk_fixed_clk fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_ADSYS_26M, "adsys_26m", "clk26m", 26 * MHZ),
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk26m", 125 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_DIG, "dsi0_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_DSI1_DIG, "dsi1_dig", "clk26m", DUMMY_RATE),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_ARMCA53PLL_754M, "armca53pll_754m", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_ARMCA53PLL_502M, "armca53pll_502m", "clk26m", 1, 3),
+
+ FACTOR(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7),
+
+ FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3),
+
+ FACTOR(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26),
+
+ FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1),
+ FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
+ FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
+
+ FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "clk26m", 1, 3),
+
+ FACTOR(CLK_TOP_ARMCA53PLL_D2, "armca53pll_d2", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_ARMCA53PLL_D3, "armca53pll_d3", "clk26m", 1, 1),
+
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16),
+
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4),
+
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4),
+
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16),
+
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2),
+
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3),
+ FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4),
+
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2),
+ FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4),
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ TOP_MUX_GATE_NOSR(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x40, 0, 3, 7, CLK_IS_CRITICAL),
+ TOP_MUX_GATE_NOSR(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x40, 8, 1, 15, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x40, 16, 1, 23, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x40, 24, 3, 31, 0),
+ /* CLK_CFG_1 */
+ TOP_MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x50, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x50, 8, 4, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x50, 16, 4, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x50, 24, 4, 31, 0),
+ /* CLK_CFG_2 */
+ TOP_MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x60, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x60, 8, 1, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x60, 16, 3, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x60, 24, 2, 31, 0),
+ /* CLK_CFG_3 */
+ TOP_MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x70, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents,
+ 0x70, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents, 0x70, 16, 4, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents, 0x70, 24, 3, 31, 0),
+ /* CLK_CFG_4 */
+ TOP_MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents, 0x80, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents, 0x80, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x80, 16, 2, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x80, 24, 3, 31, 0),
+ /* CLK_CFG_5 */
+ TOP_MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x90, 0, 3, 5, 0),
+ TOP_MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x90, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_MJC_SEL, "mjc_sel", mjc_parents, 0x90, 24, 4, 31, 0),
+ /* CLK_CFG_6 */
+ /*
+ * The dpi0_sel clock should not propagate rate changes to its parent
+ * clock so the dpi driver can have full control over PLL and divider.
+ */
+ TOP_MUX_GATE_NOSR(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0xa0, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0xa0, 8, 2, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents,
+ 0xa0, 16, 3, 23, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0xa0, 24, 2, 31, 0),
+ /* CLK_CFG_7 */
+ TOP_MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0xb0, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents,
+ 0xb0, 8, 2, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents,
+ 0xb0, 16, 2, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0xb0, 24, 2, 31, 0),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+ MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1),
+ MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1),
+ MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1),
+ MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1),
+ MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
+
+ DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0),
+ DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8),
+ DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16),
+ DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0),
+
+ DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28),
+ DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0),
+ DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8),
+ DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16),
+ DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24),
+ DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
+};
+
+
+static const struct of_device_id of_match_clk_mt6795_topckgen[] = {
+ { .compatible = "mediatek,mt6795-topckgen" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_topckgen_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ if (ret)
+ goto unregister_fixed_clks;
+
+ ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt6795_top_clk_lock, clk_data);
+ if (ret)
+ goto unregister_factors;
+
+ ret = mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base,
+ &mt6795_top_clk_lock, clk_data);
+ if (ret)
+ goto unregister_muxes;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_topckgen_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_topckgen_drv = {
+ .driver = {
+ .name = "clk-mt6795-topckgen",
+ .of_match_table = of_match_clk_mt6795_topckgen,
+ },
+ .probe = clk_mt6795_topckgen_probe,
+ .remove = clk_mt6795_topckgen_remove,
+};
+module_platform_driver(clk_mt6795_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 topckgen clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
new file mode 100644
index 000000000000..d85d04e0d016
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define GATE_VDEC(_id, _name, _parent, _regs) \
+ GATE_MTK(_id, _name, _parent, _regs, 0, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+static const struct mtk_gate vdec_clks[] = {
+ GATE_VDEC(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", &vdec0_cg_regs),
+ GATE_VDEC(CLK_VDEC_LARB_CKEN, "vdec_larb_cken", "mm_sel", &vdec1_cg_regs),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_vdecsys[] = {
+ { .compatible = "mediatek,mt6795-vdecsys", .data = &vdec_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_vdecsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6795-vdecsys",
+ .of_match_table = of_match_clk_mt6795_vdecsys,
+ },
+};
+module_platform_driver(clk_mt6795_vdecsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
new file mode 100644
index 000000000000..de40a982ca96
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_LARB, "venc_larb", "venc_sel", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc", "venc_sel", 8),
+ GATE_VENC(CLK_VENC_JPGDEC, "venc_jpgdec", "venc_sel", 12),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_vencsys[] = {
+ { .compatible = "mediatek,mt6795-vencsys", .data = &venc_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_vencsys_drv = {
+ .driver = {
+ .name = "clk-mt6795-vencsys",
+ .of_match_table = of_match_clk_mt6795_vencsys,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt6795_vencsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 25d17db13bac..7c6a53fbb8be 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -32,33 +32,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_LARB6, "img_larb6", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6797_img[] = {
- { .compatible = "mediatek,mt6797-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt6797_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_img[] = {
+ {
+ .compatible = "mediatek,mt6797-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_img_drv = {
- .probe = clk_mt6797_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-img",
.of_match_table = of_match_clk_mt6797_img,
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index de857894e033..6120fccc859f 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -49,33 +49,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6797_vdec[] = {
- { .compatible = "mediatek,mt6797-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt6797_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_vdec[] = {
+ {
+ .compatible = "mediatek,mt6797-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_vdec_drv = {
- .probe = clk_mt6797_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-vdec",
.of_match_table = of_match_clk_mt6797_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 78b7ed55f979..834d3834d2bb 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -34,33 +34,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_3, "venc_3", "venc_sel", 12),
};
-static const struct of_device_id of_match_clk_mt6797_venc[] = {
- { .compatible = "mediatek,mt6797-vencsys", },
- {}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
};
-static int clk_mt6797_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_venc[] = {
+ {
+ .compatible = "mediatek,mt6797-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_venc_drv = {
- .probe = clk_mt6797_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-venc",
.of_match_table = of_match_clk_mt6797_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index fcc598a45165..6907b1a6a824 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -34,26 +34,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12),
};
-static int clk_mt8183_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
static const struct of_device_id of_match_clk_mt8183_cam[] = {
- { .compatible = "mediatek,mt8183-camsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_cam_drv = {
- .probe = clk_mt8183_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-cam",
.of_match_table = of_match_clk_mt8183_cam,
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index eb2def2cf0ae..8d884425d79f 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -34,26 +34,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9),
};
-static int clk_mt8183_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt8183_img[] = {
- { .compatible = "mediatek,mt8183-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_img_drv = {
- .probe = clk_mt8183_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-img",
.of_match_table = of_match_clk_mt8183_img,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index b30fc9f47518..953a8a33d048 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -27,26 +27,23 @@ static const struct mtk_gate ipu_core0_clks[] = {
GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2),
};
-static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_core0_desc = {
+ .clks = ipu_core0_clks,
+ .num_clks = ARRAY_SIZE(ipu_core0_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
- { .compatible = "mediatek,mt8183-ipu_core0", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_core0",
+ .data = &ipu_core0_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_core0_drv = {
- .probe = clk_mt8183_ipu_core0_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core0",
.of_match_table = of_match_clk_mt8183_ipu_core0,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index b378957e11d0..221d12265974 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -27,26 +27,23 @@ static const struct mtk_gate ipu_core1_clks[] = {
GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2),
};
-static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_core1_desc = {
+ .clks = ipu_core1_clks,
+ .num_clks = ARRAY_SIZE(ipu_core1_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
- { .compatible = "mediatek,mt8183-ipu_core1", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_core1",
+ .data = &ipu_core1_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_core1_drv = {
- .probe = clk_mt8183_ipu_core1_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core1",
.of_match_table = of_match_clk_mt8183_ipu_core1,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 941b43ac8bec..8c4fd96df821 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -25,26 +25,23 @@ static const struct mtk_gate ipu_adl_clks[] = {
GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24),
};
-static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_adl_desc = {
+ .clks = ipu_adl_clks,
+ .num_clks = ARRAY_SIZE(ipu_adl_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
- { .compatible = "mediatek,mt8183-ipu_adl", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_adl",
+ .data = &ipu_adl_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_adl_drv = {
- .probe = clk_mt8183_ipu_adl_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_adl",
.of_match_table = of_match_clk_mt8183_ipu_adl,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index ae82c2e17110..14a4c3ff82a1 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -94,26 +94,23 @@ static const struct mtk_gate ipu_conn_clks[] = {
"ipu_conn_cab3to1_slice", "dsp1_sel", 17),
};
-static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_conn_desc = {
+ .clks = ipu_conn_clks,
+ .num_clks = ARRAY_SIZE(ipu_conn_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
- { .compatible = "mediatek,mt8183-ipu_conn", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_conn",
+ .data = &ipu_conn_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_conn_drv = {
- .probe = clk_mt8183_ipu_conn_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_conn",
.of_match_table = of_match_clk_mt8183_ipu_conn,
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index d774edaf760b..730c9ae5ea12 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -18,36 +18,31 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
- &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
};
-static int clk_mt8183_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- pm_runtime_enable(&pdev->dev);
-
- clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
-
- mtk_clk_register_gates_with_dev(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data, &pdev->dev);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
- { .compatible = "mediatek,mt8183-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt8183-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_mfg_drv = {
- .probe = clk_mt8183_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-mfg",
.of_match_table = of_match_clk_mt8183_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 0548cde159d0..c294e50b96b7 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -38,26 +38,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0),
};
-static int clk_mt8183_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
static const struct of_device_id of_match_clk_mt8183_vdec[] = {
- { .compatible = "mediatek,mt8183-vdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_vdec_drv = {
- .probe = clk_mt8183_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-vdec",
.of_match_table = of_match_clk_mt8183_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index f86ec607d87a..0051c5d92fc5 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -30,26 +30,23 @@ static const struct mtk_gate venc_clks[] = {
"mm_sel", 8),
};
-static int clk_mt8183_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt8183_venc[] = {
- { .compatible = "mediatek,mt8183-vencsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_venc_drv = {
- .probe = clk_mt8183_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-venc",
.of_match_table = of_match_clk_mt8183_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 8512101e1189..1860a35a723a 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1198,10 +1198,33 @@ static void clk_mt8183_top_init_early(struct device_node *node)
CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
clk_mt8183_top_init_early);
+/* Register mux notifier for MFG mux */
+static int clk_mt8183_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+ int i;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(top_muxes); i++)
+ if (top_muxes[i].id == CLK_TOP_MUX_MFG)
+ break;
+ if (i == ARRAY_SIZE(top_muxes))
+ return -EINVAL;
+
+ mfg_mux_nb->ops = top_muxes[i].ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to 26M crystal */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
+ int ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -1227,6 +1250,11 @@ static int clk_mt8183_top_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
top_clk_data);
+ ret = clk_mt8183_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MUX_MFG]->clk);
+ if (ret)
+ return ret;
+
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
top_clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index fc74cd80b4b0..90b57d46eef7 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -98,6 +98,7 @@ static const struct of_device_id of_match_clk_mt8192_cam[] = {
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-cam",
.of_match_table = of_match_clk_mt8192_cam,
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index 7ce3abe42577..da82d65a7650 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -61,6 +61,7 @@ static const struct of_device_id of_match_clk_mt8192_img[] = {
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-img",
.of_match_table = of_match_clk_mt8192_img,
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index 700356ac6a58..ff8e20bb44bb 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -110,6 +110,7 @@ static const struct of_device_id of_match_clk_mt8192_imp_iic_wrap[] = {
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-imp_iic_wrap",
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index 730d91b64b3f..0225abe4170a 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -48,6 +48,7 @@ static const struct of_device_id of_match_clk_mt8192_ipe[] = {
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-ipe",
.of_match_table = of_match_clk_mt8192_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index 93c87ae2f332..4675788d7816 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -73,6 +73,7 @@ static const struct of_device_id of_match_clk_mt8192_mdp[] = {
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mdp",
.of_match_table = of_match_clk_mt8192_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index 3bbc7469f0e4..ec5b44ffa458 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -18,8 +18,10 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr, \
+ CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_pll_sel", 0),
@@ -41,6 +43,7 @@ static const struct of_device_id of_match_clk_mt8192_mfg[] = {
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mfg",
.of_match_table = of_match_clk_mt8192_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 635f7a0b629a..a72e1b73fce8 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -55,6 +55,7 @@ static const struct of_device_id of_match_clk_mt8192_msdc[] = {
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-msdc",
.of_match_table = of_match_clk_mt8192_msdc,
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index 58725d79dd13..18a8679108b8 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -41,6 +41,7 @@ static const struct of_device_id of_match_clk_mt8192_scp_adsp[] = {
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-scp_adsp",
.of_match_table = of_match_clk_mt8192_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index b1d95cfbf22a..e149962dbbf9 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -85,6 +85,7 @@ static const struct of_device_id of_match_clk_mt8192_vdec[] = {
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index c0d867bff09e..80b8bb170996 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -44,6 +44,7 @@ static const struct of_device_id of_match_clk_mt8192_venc[] = {
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-venc",
.of_match_table = of_match_clk_mt8192_venc,
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index ebbd2798d9a3..d0f226931070 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -167,22 +167,7 @@ static const char * const mdp_parents[] = {
"mmpll_d5_d2"
};
-static const char * const img1_parents[] = {
- "clk26m",
- "univpll_d4",
- "tvdpll_ck",
- "mainpll_d4",
- "univpll_d5",
- "mmpll_d6",
- "univpll_d6",
- "mainpll_d6",
- "mmpll_d4_d2",
- "mainpll_d4_d2",
- "mmpll_d6_d2",
- "mmpll_d5_d2"
-};
-
-static const char * const img2_parents[] = {
+static const char * const img_parents[] = {
"clk26m",
"univpll_d4",
"tvdpll_ck",
@@ -280,61 +265,6 @@ static const char * const camtg_parents[] = {
"univpll_192m_d32"
};
-static const char * const camtg2_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg3_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg4_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg5_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg6_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
static const char * const uart_parents[] = {
"clk26m",
"univpll_d6_d8"
@@ -362,15 +292,7 @@ static const char * const msdc50_0_parents[] = {
"univpll_d4_d2"
};
-static const char * const msdc30_1_parents[] = {
- "clk26m",
- "univpll_d6_d2",
- "mainpll_d6_d2",
- "mainpll_d7_d2",
- "msdcpll_d2"
-};
-
-static const char * const msdc30_2_parents[] = {
+static const char * const msdc30_parents[] = {
"clk26m",
"univpll_d6_d2",
"mainpll_d6_d2",
@@ -457,39 +379,6 @@ static const char * const seninf_parents[] = {
"univpll_d5"
};
-static const char * const seninf1_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
-static const char * const seninf2_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
-static const char * const seninf3_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
static const char * const tl_parents[] = {
"clk26m",
"univpll_192m_d2",
@@ -649,52 +538,7 @@ static const char * const sflash_parents[] = {
"univpll_d5_d8"
};
-static const char * const apll_i2s0_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s1_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s2_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s3_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s4_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s5_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s6_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s7_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s8_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s9_m_parents[] = {
+static const char * const apll_i2s_m_parents[] = {
"aud_1_sel",
"aud_2_sel"
};
@@ -724,9 +568,9 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_MDP_SEL, "mdp_sel",
mdp_parents, 0x020, 0x024, 0x028, 8, 4, 15, 0x004, 5),
MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG1_SEL, "img1_sel",
- img1_parents, 0x020, 0x024, 0x028, 16, 4, 23, 0x004, 6),
+ img_parents, 0x020, 0x024, 0x028, 16, 4, 23, 0x004, 6),
MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG2_SEL, "img2_sel",
- img2_parents, 0x020, 0x024, 0x028, 24, 4, 31, 0x004, 7),
+ img_parents, 0x020, 0x024, 0x028, 24, 4, 31, 0x004, 7),
/* CLK_CFG_2 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE_SEL, "ipe_sel",
ipe_parents, 0x030, 0x034, 0x038, 0, 4, 7, 0x004, 8),
@@ -747,16 +591,16 @@ static const struct mtk_mux top_mtk_muxes[] = {
camtg_parents, 0x050, 0x054, 0x058, 24, 3, 31, 0x004, 19),
/* CLK_CFG_5 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel",
- camtg2_parents, 0x060, 0x064, 0x068, 0, 3, 7, 0x004, 20),
+ camtg_parents, 0x060, 0x064, 0x068, 0, 3, 7, 0x004, 20),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel",
- camtg3_parents, 0x060, 0x064, 0x068, 8, 3, 15, 0x004, 21),
+ camtg_parents, 0x060, 0x064, 0x068, 8, 3, 15, 0x004, 21),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4_SEL, "camtg4_sel",
- camtg4_parents, 0x060, 0x064, 0x068, 16, 3, 23, 0x004, 22),
+ camtg_parents, 0x060, 0x064, 0x068, 16, 3, 23, 0x004, 22),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5_SEL, "camtg5_sel",
- camtg5_parents, 0x060, 0x064, 0x068, 24, 3, 31, 0x004, 23),
+ camtg_parents, 0x060, 0x064, 0x068, 24, 3, 31, 0x004, 23),
/* CLK_CFG_6 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG6_SEL, "camtg6_sel",
- camtg6_parents, 0x070, 0x074, 0x078, 0, 3, 7, 0x004, 24),
+ camtg_parents, 0x070, 0x074, 0x078, 0, 3, 7, 0x004, 24),
MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel",
uart_parents, 0x070, 0x074, 0x078, 8, 1, 15, 0x004, 25),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel",
@@ -767,9 +611,9 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
msdc50_0_parents, 0x080, 0x084, 0x088, 0, 3, 7, 0x004, 28),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
- msdc30_1_parents, 0x080, 0x084, 0x088, 8, 3, 15, 0x004, 29),
+ msdc30_parents, 0x080, 0x084, 0x088, 8, 3, 15, 0x004, 29),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
- msdc30_2_parents, 0x080, 0x084, 0x088, 16, 3, 23, 0x004, 30),
+ msdc30_parents, 0x080, 0x084, 0x088, 16, 3, 23, 0x004, 30),
MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel",
audio_parents, 0x080, 0x084, 0x088, 24, 2, 31, 0x008, 0),
/* CLK_CFG_8 */
@@ -796,12 +640,12 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel",
seninf_parents, 0x0b0, 0x0b4, 0x0b8, 16, 3, 23, 0x008, 11),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1_SEL, "seninf1_sel",
- seninf1_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31, 0x008, 12),
+ seninf_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31, 0x008, 12),
/* CLK_CFG_11 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2_SEL, "seninf2_sel",
- seninf2_parents, 0x0c0, 0x0c4, 0x0c8, 0, 3, 7, 0x008, 13),
+ seninf_parents, 0x0c0, 0x0c4, 0x0c8, 0, 3, 7, 0x008, 13),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3_SEL, "seninf3_sel",
- seninf3_parents, 0x0c0, 0x0c4, 0x0c8, 8, 3, 15, 0x008, 14),
+ seninf_parents, 0x0c0, 0x0c4, 0x0c8, 8, 3, 15, 0x008, 14),
MUX_GATE_CLR_SET_UPD(CLK_TOP_TL_SEL, "tl_sel",
tl_parents, 0x0c0, 0x0c4, 0x0c8, 16, 2, 23, 0x008, 15),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel",
@@ -847,16 +691,16 @@ static const struct mtk_mux top_mtk_muxes[] = {
static struct mtk_composite top_muxes[] = {
/* CLK_AUDDIV_0 */
- MUX(CLK_TOP_APLL_I2S0_M_SEL, "apll_i2s0_m_sel", apll_i2s0_m_parents, 0x320, 16, 1),
- MUX(CLK_TOP_APLL_I2S1_M_SEL, "apll_i2s1_m_sel", apll_i2s1_m_parents, 0x320, 17, 1),
- MUX(CLK_TOP_APLL_I2S2_M_SEL, "apll_i2s2_m_sel", apll_i2s2_m_parents, 0x320, 18, 1),
- MUX(CLK_TOP_APLL_I2S3_M_SEL, "apll_i2s3_m_sel", apll_i2s3_m_parents, 0x320, 19, 1),
- MUX(CLK_TOP_APLL_I2S4_M_SEL, "apll_i2s4_m_sel", apll_i2s4_m_parents, 0x320, 20, 1),
- MUX(CLK_TOP_APLL_I2S5_M_SEL, "apll_i2s5_m_sel", apll_i2s5_m_parents, 0x320, 21, 1),
- MUX(CLK_TOP_APLL_I2S6_M_SEL, "apll_i2s6_m_sel", apll_i2s6_m_parents, 0x320, 22, 1),
- MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s7_m_parents, 0x320, 23, 1),
- MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s8_m_parents, 0x320, 24, 1),
- MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s9_m_parents, 0x320, 25, 1),
+ MUX(CLK_TOP_APLL_I2S0_M_SEL, "apll_i2s0_m_sel", apll_i2s_m_parents, 0x320, 16, 1),
+ MUX(CLK_TOP_APLL_I2S1_M_SEL, "apll_i2s1_m_sel", apll_i2s_m_parents, 0x320, 17, 1),
+ MUX(CLK_TOP_APLL_I2S2_M_SEL, "apll_i2s2_m_sel", apll_i2s_m_parents, 0x320, 18, 1),
+ MUX(CLK_TOP_APLL_I2S3_M_SEL, "apll_i2s3_m_sel", apll_i2s_m_parents, 0x320, 19, 1),
+ MUX(CLK_TOP_APLL_I2S4_M_SEL, "apll_i2s4_m_sel", apll_i2s_m_parents, 0x320, 20, 1),
+ MUX(CLK_TOP_APLL_I2S5_M_SEL, "apll_i2s5_m_sel", apll_i2s_m_parents, 0x320, 21, 1),
+ MUX(CLK_TOP_APLL_I2S6_M_SEL, "apll_i2s6_m_sel", apll_i2s_m_parents, 0x320, 22, 1),
+ MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s_m_parents, 0x320, 23, 1),
+ MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s_m_parents, 0x320, 24, 1),
+ MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s_m_parents, 0x320, 25, 1),
};
static const struct mtk_composite top_adj_divs[] = {
@@ -1224,6 +1068,28 @@ static void clk_mt8192_top_init_early(struct device_node *node)
CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen",
clk_mt8192_top_init_early);
+/* Register mux notifier for MFG mux */
+static int clk_mt8192_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+ int i;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(top_mtk_muxes); i++)
+ if (top_mtk_muxes[i].id == CLK_TOP_MFG_PLL_SEL)
+ break;
+ if (i == ARRAY_SIZE(top_mtk_muxes))
+ return -EINVAL;
+
+ mfg_mux_nb->ops = top_mtk_muxes[i].ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to 26M crystal */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8192_top_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -1247,6 +1113,12 @@ static int clk_mt8192_top_probe(struct platform_device *pdev)
if (r)
return r;
+ r = clk_mt8192_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MFG_PLL_SEL]->clk);
+ if (r)
+ return r;
+
+
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
top_clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 97657f255618..fcd410461d3b 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -55,8 +55,12 @@ static const struct mtk_gate_regs infra_ao4_cg_regs = {
#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &infra_ao2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, 0)
#define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
@@ -136,8 +140,11 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_SYS, "infra_ao_unipro_sys", "top_ufs", 11),
GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_TICK, "infra_ao_unipro_tick", "top_ufs_tick1us", 12),
GATE_INFRA_AO2(CLK_INFRA_AO_UFS_MP_SAP_B, "infra_ao_ufs_mp_sap_b", "top_ufs_mp_sap_cfg", 13),
- GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15),
- GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17),
+ /* pwrmcu is used by ATF for platform PM: clocks must never be disabled by the kernel */
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17,
+ CLK_IS_CRITICAL),
GATE_INFRA_AO2(CLK_INFRA_AO_APDMA_B, "infra_ao_apdma_b", "top_axi", 18),
GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
@@ -193,6 +200,9 @@ static u16 infra_ao_rst_ofs[] = {
static u16 infra_ao_idx_map[] = {
[MT8195_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8195_INFRA_RST2_USBSIF_P1_SWRST] = 2 * RST_NR_PER_BANK + 18,
+ [MT8195_INFRA_RST2_PCIE_P0_SWRST] = 2 * RST_NR_PER_BANK + 26,
+ [MT8195_INFRA_RST2_PCIE_P1_SWRST] = 2 * RST_NR_PER_BANK + 27,
[MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
[MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 10,
};
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index 9411c556a5a9..c94cb71bd9b9 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -17,10 +17,12 @@ static const struct mtk_gate_regs mfg_cg_regs = {
};
#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr, \
+ CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
- GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg_core_tmp", 0),
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_ck_fast_ref", 0),
};
static const struct mtk_clk_desc mfg_desc = {
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index ec70e1f65eaf..8cbab5ca2e58 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -298,11 +298,14 @@ static const char * const ipu_if_parents[] = {
"mmpll_d4"
};
+/*
+ * MFG can be also parented to "univpll_d6" and "univpll_d7":
+ * these have been removed from the parents list to let us
+ * achieve GPU DVFS without any special clock handlers.
+ */
static const char * const mfg_parents[] = {
"clk26m",
- "mainpll_d5_d2",
- "univpll_d6",
- "univpll_d7"
+ "mainpll_d5_d2"
};
static const char * const camtg_parents[] = {
@@ -1149,11 +1152,6 @@ static const struct mtk_mux top_mtk_muxes[] = {
*/
};
-static struct mtk_composite top_muxes[] = {
- /* CLK_MISC_CFG_3 */
- MUX(CLK_TOP_MFG_CK_FAST_REF, "mfg_ck_fast_ref", mfg_fast_parents, 0x0250, 8, 1),
-};
-
static const struct mtk_composite top_adj_divs[] = {
DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "top_i2si1_mck", 0x0320, 0, 0x0328, 8, 0),
DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "top_i2si2_mck", 0x0320, 1, 0x0328, 8, 8),
@@ -1222,10 +1220,26 @@ static const struct of_device_id of_match_clk_mt8195_topck[] = {
{}
};
+/* Register mux notifier for MFG mux */
+static int clk_mt8195_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ mfg_mux_nb->ops = &clk_mux_ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to TOP_MFG_CORE_TMP */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8195_topck_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *top_clk_data;
struct device_node *node = pdev->dev.of_node;
+ struct clk_hw *hw;
int r;
void __iomem *base;
@@ -1253,15 +1267,22 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_factors;
- r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8195_clk_lock, top_clk_data);
+ hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_parents,
+ ARRAY_SIZE(mfg_fast_parents), CLK_SET_RATE_PARENT,
+ (base + 0x250), 8, 1, 0, &mt8195_clk_lock);
+ if (IS_ERR(hw))
+ goto unregister_muxes;
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw;
+
+ r = clk_mt8195_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF]->clk);
if (r)
goto unregister_muxes;
r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
&mt8195_clk_lock, top_clk_data);
if (r)
- goto unregister_composite_muxes;
+ goto unregister_muxes;
r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
if (r)
@@ -1279,8 +1300,6 @@ unregister_gates:
mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
unregister_composite_divs:
mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
-unregister_composite_muxes:
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
unregister_muxes:
mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
unregister_factors:
@@ -1300,7 +1319,6 @@ static int clk_mt8195_topck_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 261a7f76dd3c..07b46bfd5040 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -37,6 +37,10 @@ static const struct mtk_gate_regs vdo0_2_cg_regs = {
#define GATE_VDO0_2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO0_2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo0_2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
static const struct mtk_gate vdo0_clks[] = {
/* VDO0_0 */
GATE_VDO0_0(CLK_VDO0_DISP_OVL0, "vdo0_disp_ovl0", "top_vpp", 0),
@@ -85,7 +89,8 @@ static const struct mtk_gate vdo0_clks[] = {
/* VDO0_2 */
GATE_VDO0_2(CLK_VDO0_DSI0_DSI, "vdo0_dsi0_dsi", "top_dsi_occ", 0),
GATE_VDO0_2(CLK_VDO0_DSI1_DSI, "vdo0_dsi1_dsi", "top_dsi_occ", 8),
- GATE_VDO0_2(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf", "top_edp", 16),
+ GATE_VDO0_2_FLAGS(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf",
+ "top_edp", 16, CLK_SET_RATE_PARENT),
};
static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 3378487d2c90..835335b9d87b 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -34,6 +34,12 @@ static const struct mtk_gate_regs vdo1_3_cg_regs = {
.sta_ofs = 0x140,
};
+static const struct mtk_gate_regs vdo1_4_cg_regs = {
+ .set_ofs = 0x400,
+ .clr_ofs = 0x400,
+ .sta_ofs = 0x400,
+};
+
#define GATE_VDO1_0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
@@ -43,9 +49,16 @@ static const struct mtk_gate_regs vdo1_3_cg_regs = {
#define GATE_VDO1_2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo1_2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
#define GATE_VDO1_3(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
static const struct mtk_gate vdo1_clks[] = {
/* VDO1_0 */
GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
@@ -99,10 +112,12 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI0, "vdo1_disp_monitor_dpi0", "top_vpp", 1),
GATE_VDO1_2(CLK_VDO1_DPI1, "vdo1_dpi1", "top_vpp", 8),
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI1, "vdo1_disp_monitor_dpi1", "top_vpp", 9),
- GATE_VDO1_2(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_vpp", 16),
+ GATE_VDO1_2_FLAGS(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_dp", 16, CLK_SET_RATE_PARENT),
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf", "top_vpp", 17),
/* VDO1_3 */
GATE_VDO1_3(CLK_VDO1_26M_SLOW, "vdo1_26m_slow", "clk26m", 8),
+ /* VDO1_4 */
+ GATE_VDO1_4(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
new file mode 100644
index 000000000000..91ffe89d9721
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs apu_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_APU(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apu_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate apu_clks[] = {
+ GATE_APU(CLK_APU_AHB, "apu_ahb", "ifr_apu_axi", 5),
+ GATE_APU(CLK_APU_EDMA, "apu_edma", "apu_sel", 4),
+ GATE_APU(CLK_APU_IF_CK, "apu_if_ck", "apu_if_sel", 3),
+ GATE_APU(CLK_APU_JTAG, "apu_jtag", "clk26m", 2),
+ GATE_APU(CLK_APU_AXI, "apu_axi", "apu_sel", 1),
+ GATE_APU(CLK_APU_IPU_CK, "apu_ck", "apu_sel", 0),
+};
+
+static const struct mtk_clk_desc apu_desc = {
+ .clks = apu_clks,
+ .num_clks = ARRAY_SIZE(apu_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_apu[] = {
+ {
+ .compatible = "mediatek,mt8365-apu",
+ .data = &apu_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_apu_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-apu",
+ .of_match_table = of_match_clk_mt8365_apu,
+ },
+};
+builtin_platform_driver(clk_mt8365_apu_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
new file mode 100644
index 000000000000..31d5b5cd6de1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB2, "cam_larb2", "mm_sel", 0),
+ GATE_CAM(CLK_CAM, "cam", "mm_sel", 6),
+ GATE_CAM(CLK_CAMTG, "camtg", "mm_sel", 7),
+ GATE_CAM(CLK_CAM_SENIF, "cam_senif", "mm_sel", 8),
+ GATE_CAM(CLK_CAMSV0, "camsv0", "mm_sel", 9),
+ GATE_CAM(CLK_CAMSV1, "camsv1", "mm_sel", 10),
+ GATE_CAM(CLK_CAM_FDVT, "cam_fdvt", "mm_sel", 11),
+ GATE_CAM(CLK_CAM_WPE, "cam_wpe", "mm_sel", 12),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_cam[] = {
+ {
+ .compatible = "mediatek,mt8365-imgsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-cam",
+ .of_match_table = of_match_clk_mt8365_cam,
+ },
+};
+builtin_platform_driver(clk_mt8365_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
new file mode 100644
index 000000000000..587b49128b03
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfg0_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs mfg1_cg_regs = {
+ .set_ofs = 0x280,
+ .clr_ofs = 0x280,
+ .sta_ofs = 0x280,
+};
+
+#define GATE_MFG0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MFG1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ /* MFG0 */
+ GATE_MFG0(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+ /* MFG1 */
+ GATE_MFG1(CLK_MFG_MBIST_DIAG, "mfg_mbist_diag", "mbist_diag_sel", 24),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_mfg[] = {
+ {
+ .compatible = "mediatek,mt8365-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-mfg",
+ .of_match_table = of_match_clk_mt8365_mfg,
+ },
+};
+builtin_platform_driver(clk_mt8365_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
new file mode 100644
index 000000000000..5c8bf18ab1f1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre, SAS
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 0),
+ GATE_MM0(CLK_MM_MM_MDP_CCORR0, "mm_mdp_ccorr0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MM_DISP_RSZ0, "mm_disp_rsz0", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 13),
+ GATE_MM0(CLK_MM_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 15),
+ GATE_MM0(CLK_MM_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
+ GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
+ GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
+ GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
+ GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
+ GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_MM_SMI_COMM0, "mm_smi_comm0", "mm_sel", 24),
+ GATE_MM0(CLK_MM_MM_SMI_COMM1, "mm_smi_comm1", "mm_sel", 25),
+ GATE_MM0(CLK_MM_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 26),
+ GATE_MM0(CLK_MM_MM_SMI_IMG, "mm_smi_img", "mm_sel", 27),
+ GATE_MM0(CLK_MM_MM_SMI_CAM, "mm_smi_cam", "mm_sel", 28),
+ GATE_MM0(CLK_MM_IMG_IMG_DL_RELAY, "mm_dl_relay", "mm_sel", 29),
+ GATE_MM0(CLK_MM_IMG_IMG_DL_ASYNC_TOP, "mm_dl_async_top", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DSI0_DIG_DSI, "mm_dsi0_dig_dsi", "dsi0_lntc_dsick", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_26M_HRTWT, "mm_f26m_hrtwt", "clk26m", 0),
+ GATE_MM1(CLK_MM_MM_DPI0, "mm_dpi0", "mm_sel", 1),
+ GATE_MM1(CLK_MM_LVDSTX_PXL, "mm_flvdstx_pxl", "vpll_dpix", 2),
+ GATE_MM1(CLK_MM_LVDSTX_CTS, "mm_flvdstx_cts", "lvdstx_dig_cts", 3),
+};
+
+static int clk_mt8365_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ ret = mtk_clk_register_gates_with_dev(node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data,
+ dev);
+ if (ret)
+ goto err_free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto err_unregister_gates;
+
+ return 0;
+
+err_unregister_gates:
+ mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+
+err_free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static struct platform_driver clk_mt8365_mm_drv = {
+ .probe = clk_mt8365_mm_probe,
+ .driver = {
+ .name = "clk-mt8365-mm",
+ },
+};
+builtin_platform_driver(clk_mt8365_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
new file mode 100644
index 000000000000..cdc678e8941c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_VDEC, "vdec_fvdec_ck", "mm_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LARB1, "vdec_flarb1_ck", "mm_sel", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_vdec[] = {
+ {
+ .compatible = "mediatek,mt8365-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-vdec",
+ .of_match_table = of_match_clk_mt8365_vdec,
+ },
+};
+builtin_platform_driver(clk_mt8365_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
new file mode 100644
index 000000000000..0e080c22119d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ /* VENC */
+ GATE_VENC(CLK_VENC, "venc_fvenc_ck", "mm_sel", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc_ck", "mm_sel", 8),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_venc[] = {
+ {
+ .compatible = "mediatek,mt8365-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-venc",
+ .of_match_table = of_match_clk_mt8365_venc,
+ },
+};
+builtin_platform_driver(clk_mt8365_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
new file mode 100644
index 000000000000..adfecb618f10
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+static DEFINE_SPINLOCK(mt8365_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_I2S0_BCK, "i2s0_bck", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m",
+ 75000000),
+ FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", "clk26m", 75000000),
+ FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx_dig_cts", "clk26m",
+ 52500000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_SYS_26M_D2, "sys_26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ_en", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MFGPLL, "mfgpll_ck", "mfgpll", 1, 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+ FACTOR(CLK_TOP_LVDSPLL_D16, "lvdspll_d16", "lvdspll", 1, 16),
+ FACTOR(CLK_TOP_USB20_192M, "usb20_192m_ck", "usb20_en", 1, 13),
+ FACTOR(CLK_TOP_USB20_192M_D4, "usb20_192m_d4", "usb20_192m_ck", 1, 4),
+ FACTOR(CLK_TOP_USB20_192M_D8, "usb20_192m_d8", "usb20_192m_ck", 1, 8),
+ FACTOR(CLK_TOP_USB20_192M_D16, "usb20_192m_d16", "usb20_192m_ck",
+ 1, 16),
+ FACTOR(CLK_TOP_USB20_192M_D32, "usb20_192m_d32", "usb20_192m_ck",
+ 1, 32),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1, 8),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_DSPPLL, "dsppll_ck", "dsppll", 1, 1),
+ FACTOR(CLK_TOP_DSPPLL_D2, "dsppll_d2", "dsppll", 1, 2),
+ FACTOR(CLK_TOP_DSPPLL_D4, "dsppll_d4", "dsppll", 1, 4),
+ FACTOR(CLK_TOP_DSPPLL_D8, "dsppll_d8", "dsppll", 1, 8),
+ FACTOR(CLK_TOP_APUPLL, "apupll_ck", "apupll", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D52, "clk26m_d52", "clk26m", 1, 52),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d7",
+ "syspll1_d4",
+ "syspll3_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll_d3",
+ "syspll1_d2"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "mmpll_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d2",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "usb20_192m_d8",
+ "univpll2_d8",
+ "usb20_192m_d4",
+ "univpll2_d32",
+ "usb20_192m_d16",
+ "usb20_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const msdc50_0_hc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll2_d2",
+ "univpll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const msdc50_2_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "univpll1_d4",
+ "syspll1_d4",
+ "syspll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const aud_spdif_parents[] = {
+ "clk26m",
+ "univpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll1_d8"
+};
+
+static const char * const ssusb_sys_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll2_d4",
+ "univpll3_d2"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll3_d2",
+ "syspll1_d8",
+ "syspll2_d8"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "syspll1_d8"
+};
+
+static const char * const senif_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll2_d2",
+ "univpll1_d2",
+ "syspll1_d2"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8",
+ "lvdspll_d16"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "sys_26m_d2",
+ "dsppll_ck",
+ "dsppll_d2",
+ "dsppll_d4",
+ "dsppll_d8"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll2_d2",
+ "syspll_d7",
+ "syspll_d3",
+ "syspll2_d4",
+ "msdcpll_d2",
+ "univpll1_d2",
+ "univpll_d5"
+};
+
+static const char * const nfiecc_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d4",
+ "syspll_d7",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const ecc_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "univpll1_d2",
+ "univpll_d3",
+ "syspll_d2"
+};
+
+static const char * const eth_parents[] = {
+ "clk26m",
+ "univpll2_d8",
+ "syspll4_d4",
+ "syspll1_d8",
+ "syspll4_d2"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "univpll2_d2",
+ "syspll_d3",
+ "syspll2_d2"
+};
+
+static const char * const gcpu_cpm_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "syspll2_d2"
+};
+
+static const char * const apu_parents[] = {
+ "clk26m",
+ "univpll_d2",
+ "apupll_ck",
+ "mmpll_ck",
+ "syspll_d3",
+ "univpll1_d2",
+ "syspll1_d2",
+ "syspll1_d4"
+};
+
+static const char * const mbist_diag_parents[] = {
+ "clk26m",
+ "syspll4_d4",
+ "univpll2_d8"
+};
+
+static const char * const apll_i2s0_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static struct mtk_composite top_misc_mux_gates[] = {
+ /* CLK_CFG_11 */
+ MUX_GATE(CLK_TOP_MBIST_DIAG_SEL, "mbist_diag_sel", mbist_diag_parents,
+ 0x0ec, 0, 2, 7),
+};
+
+struct mt8365_clk_audio_mux {
+ int id;
+ const char *name;
+ u8 shift;
+};
+
+static struct mt8365_clk_audio_mux top_misc_muxes[] = {
+ { CLK_TOP_APLL_I2S0_SEL, "apll_i2s0_sel", 11},
+ { CLK_TOP_APLL_I2S1_SEL, "apll_i2s1_sel", 12},
+ { CLK_TOP_APLL_I2S2_SEL, "apll_i2s2_sel", 13},
+ { CLK_TOP_APLL_I2S3_SEL, "apll_i2s3_sel", 14},
+ { CLK_TOP_APLL_TDMOUT_SEL, "apll_tdmout_sel", 15},
+ { CLK_TOP_APLL_TDMIN_SEL, "apll_tdmin_sel", 16},
+ { CLK_TOP_APLL_SPDIF_SEL, "apll_spdif_sel", 17},
+};
+
+#define CLK_CFG_UPDATE 0x004
+#define CLK_CFG_UPDATE1 0x008
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0x044, 0x048, 0, 2, 7, CLK_CFG_UPDATE,
+ 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040,
+ 0x044, 0x048, 8, 2, 15, CLK_CFG_UPDATE, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x040, 0x044,
+ 0x048, 16, 3, 23, CLK_CFG_UPDATE, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x040,
+ 0x044, 0x048, 24, 3, 31, CLK_CFG_UPDATE, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x050,
+ 0x054, 0x058, 0, 2, 7, CLK_CFG_UPDATE, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x050,
+ 0x054, 0x058, 8, 2, 15, CLK_CFG_UPDATE, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents,
+ 0x050, 0x054, 0x058, 16, 3, 23, CLK_CFG_UPDATE, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1_SEL, "camtg1_sel", camtg_parents,
+ 0x050, 0x054, 0x058, 24, 3, 31, CLK_CFG_UPDATE, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x060,
+ 0x064, 0x068, 0, 1, 7, CLK_CFG_UPDATE, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x060,
+ 0x064, 0x068, 8, 2, 15, CLK_CFG_UPDATE, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HC_SEL, "msdc50_0_hc_sel",
+ msdc50_0_hc_parents, 0x060, 0x064, 0x068, 16, 2,
+ 23, CLK_CFG_UPDATE, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC2_2_HC_SEL, "msdc2_2_hc_sel",
+ msdc50_0_hc_parents, 0x060, 0x064, 0x068, 24, 2,
+ 31, CLK_CFG_UPDATE, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, 0x070, 0x074, 0x078, 0, 3, 7,
+ CLK_CFG_UPDATE, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_2_SEL, "msdc50_2_sel",
+ msdc50_2_parents, 0x070, 0x074, 0x078, 8, 3, 15,
+ CLK_CFG_UPDATE, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, 0x070, 0x074, 0x078, 16, 3, 23,
+ CLK_CFG_UPDATE, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x070, 0x074, 0x078, 24, 2, 31, CLK_CFG_UPDATE,
+ 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, 0x080, 0x084, 0x088, 0, 2, 7,
+ CLK_CFG_UPDATE, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents,
+ 0x080, 0x084, 0x088, 8, 1, 15, CLK_CFG_UPDATE, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents,
+ 0x080, 0x084, 0x088, 16, 1, 23, CLK_CFG_UPDATE,
+ 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel",
+ aud_engen1_parents, 0x080, 0x084, 0x088, 24, 2, 31,
+ CLK_CFG_UPDATE, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel",
+ aud_engen2_parents, 0x090, 0x094, 0x098, 0, 2, 7,
+ CLK_CFG_UPDATE, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_SPDIF_SEL, "aud_spdif_sel",
+ aud_spdif_parents, 0x090, 0x094, 0x098, 8, 1, 15,
+ CLK_CFG_UPDATE, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel",
+ disp_pwm_parents, 0x090, 0x094, 0x098, 16, 2, 23,
+ CLK_CFG_UPDATE, 22),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DXCC_SEL, "dxcc_sel", dxcc_parents,
+ 0x0a0, 0x0a4, 0x0a8, 0, 2, 7, CLK_CFG_UPDATE,
+ 24, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_SYS_SEL, "ssusb_sys_sel",
+ ssusb_sys_parents, 0x0a0, 0x0a4, 0x0a8, 8, 2, 15,
+ CLK_CFG_UPDATE, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_SEL, "ssusb_xhci_sel",
+ ssusb_sys_parents, 0x0a0, 0x0a4, 0x0a8, 16, 2, 23,
+ CLK_CFG_UPDATE, 26),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM_SEL, "spm_sel", spm_parents,
+ 0x0a0, 0x0a4, 0x0a8, 24, 1, 31,
+ CLK_CFG_UPDATE, 27, CLK_IS_CRITICAL),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x0b0,
+ 0x0b4, 0x0b8, 0, 3, 7, CLK_CFG_UPDATE, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0b0,
+ 0x0b4, 0x0b8, 8, 2, 15, CLK_CFG_UPDATE, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENIF_SEL, "senif_sel", senif_parents,
+ 0x0b0, 0x0b4, 0x0b8, 16, 2, 23, CLK_CFG_UPDATE,
+ 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE_SEL, "aes_fde_sel",
+ aes_fde_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31,
+ CLK_CFG_UPDATE, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel", senif_parents,
+ 0x0c0, 0x0c4, 0x0c8, 0, 2, 7, CLK_CFG_UPDATE1, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x0c0,
+ 0x0c4, 0x0c8, 8, 3, 15, CLK_CFG_UPDATE1, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi0_parents, 0x0c0,
+ 0x0c4, 0x0c8, 16, 3, 23, CLK_CFG_UPDATE1, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP_SEL, "dsp_sel", dsp_parents, 0x0c0,
+ 0x0c4, 0x0c8, 24, 3, 31, CLK_CFG_UPDATE1, 3),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI2X_SEL, "nfi2x_sel", nfi2x_parents,
+ 0x0d0, 0x0d4, 0x0d8, 0, 3, 7, CLK_CFG_UPDATE1, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+ 0x0d0, 0x0d4, 0x0d8, 8, 3, 15, CLK_CFG_UPDATE1, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ECC_SEL, "ecc_sel", ecc_parents, 0x0d0,
+ 0x0d4, 0x0d8, 16, 3, 23, CLK_CFG_UPDATE1, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_SEL, "eth_sel", eth_parents, 0x0d0,
+ 0x0d4, 0x0d8, 24, 3, 31, CLK_CFG_UPDATE1, 7),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0e0,
+ 0x0e4, 0x0e8, 0, 3, 7, CLK_CFG_UPDATE1, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU_CPM_SEL, "gcpu_cpm_sel",
+ gcpu_cpm_parents, 0x0e0, 0x0e4, 0x0e8, 8, 2, 15,
+ CLK_CFG_UPDATE1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APU_SEL, "apu_sel", apu_parents, 0x0e0,
+ 0x0e4, 0x0e8, 16, 3, 23, CLK_CFG_UPDATE1, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APU_IF_SEL, "apu_if_sel", apu_parents,
+ 0x0e0, 0x0e4, 0x0e8, 24, 3, 31, CLK_CFG_UPDATE1,
+ 11),
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "armpll",
+ "mainpll",
+ "univpll_d2"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* bus_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
+ 9, 2, -1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+};
+
+#define DIV_ADJ_F(_id, _name, _parent, _reg, _shift, _width, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .clk_divider_flags = _flags, \
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "apll_i2s0_sel",
+ 0x324, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "apll_i2s1_sel",
+ 0x324, 8, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "apll_i2s2_sel",
+ 0x324, 16, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "apll_i2s3_sel",
+ 0x324, 24, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "apll_spdif_sel",
+ 0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
+};
+
+struct mtk_simple_gate {
+ int id;
+ const char *name;
+ const char *parent;
+ u32 reg;
+ u8 shift;
+ unsigned long gate_flags;
+};
+
+static const struct mtk_simple_gate top_clk_gates[] = {
+ { CLK_TOP_CONN_32K, "conn_32k", "clk32k", 0x0, 10, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_CONN_26M, "conn_26m", "clk26m", 0x0, 11, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_DSP_32K, "dsp_32k", "clk32k", 0x0, 16, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_DSP_26M, "dsp_26m", "clk26m", 0x0, 17, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_192m_d4", 0x104, 8, 0 },
+ { CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "usb20_192m_d4", 0x104, 9, 0 },
+ { CLK_TOP_LVDSTX_CLKDIG_EN, "lvdstx_dig_en", "lvdstx_dig_cts", 0x104, 20, 0 },
+ { CLK_TOP_VPLL_DPIX_EN, "vpll_dpix_en", "vpll_dpix", 0x104, 21, 0 },
+ { CLK_TOP_SSUSB_TOP_CK_EN, "ssusb_top_ck_en", NULL, 0x104, 22, 0 },
+ { CLK_TOP_SSUSB_PHY_CK_EN, "ssusb_phy_ck_en", NULL, 0x104, 23, 0 },
+ { CLK_TOP_AUD_I2S0_M, "aud_i2s0_m_ck", "apll12_ck_div0", 0x320, 0, 0 },
+ { CLK_TOP_AUD_I2S1_M, "aud_i2s1_m_ck", "apll12_ck_div1", 0x320, 1, 0 },
+ { CLK_TOP_AUD_I2S2_M, "aud_i2s2_m_ck", "apll12_ck_div2", 0x320, 2, 0 },
+ { CLK_TOP_AUD_I2S3_M, "aud_i2s3_m_ck", "apll12_ck_div3", 0x320, 3, 0 },
+ { CLK_TOP_AUD_TDMOUT_M, "aud_tdmout_m_ck", "apll12_ck_div4", 0x320, 4, 0 },
+ { CLK_TOP_AUD_TDMOUT_B, "aud_tdmout_b_ck", "apll12_ck_div4b", 0x320, 5, 0 },
+ { CLK_TOP_AUD_TDMIN_M, "aud_tdmin_m_ck", "apll12_ck_div5", 0x320, 6, 0 },
+ { CLK_TOP_AUD_TDMIN_B, "aud_tdmin_b_ck", "apll12_ck_div5b", 0x320, 7, 0 },
+ { CLK_TOP_AUD_SPDIF_M, "aud_spdif_m_ck", "apll12_ck_div6", 0x320, 8, 0 },
+};
+
+static const struct mtk_gate_regs ifr2_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs ifr3_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs ifr4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs ifr5_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs ifr6_cg_regs = {
+ .set_ofs = 0xd0,
+ .clr_ofs = 0xd4,
+ .sta_ofs = 0xd8,
+};
+
+#define GATE_IFR2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR4(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR6(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr6_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ifr_clks[] = {
+ /* IFR2 */
+ GATE_IFR2(CLK_IFR_PMIC_TMR, "ifr_pmic_tmr", "clk26m", 0),
+ GATE_IFR2(CLK_IFR_PMIC_AP, "ifr_pmic_ap", "clk26m", 1),
+ GATE_IFR2(CLK_IFR_PMIC_MD, "ifr_pmic_md", "clk26m", 2),
+ GATE_IFR2(CLK_IFR_PMIC_CONN, "ifr_pmic_conn", "clk26m", 3),
+ GATE_IFR2(CLK_IFR_ICUSB, "ifr_icusb", "axi_sel", 8),
+ GATE_IFR2(CLK_IFR_GCE, "ifr_gce", "axi_sel", 9),
+ GATE_IFR2(CLK_IFR_THERM, "ifr_therm", "axi_sel", 10),
+ GATE_IFR2(CLK_IFR_PWM_HCLK, "ifr_pwm_hclk", "axi_sel", 15),
+ GATE_IFR2(CLK_IFR_PWM1, "ifr_pwm1", "pwm_sel", 16),
+ GATE_IFR2(CLK_IFR_PWM2, "ifr_pwm2", "pwm_sel", 17),
+ GATE_IFR2(CLK_IFR_PWM3, "ifr_pwm3", "pwm_sel", 18),
+ GATE_IFR2(CLK_IFR_PWM4, "ifr_pwm4", "pwm_sel", 19),
+ GATE_IFR2(CLK_IFR_PWM5, "ifr_pwm5", "pwm_sel", 20),
+ GATE_IFR2(CLK_IFR_PWM, "ifr_pwm", "pwm_sel", 21),
+ GATE_IFR2(CLK_IFR_UART0, "ifr_uart0", "uart_sel", 22),
+ GATE_IFR2(CLK_IFR_UART1, "ifr_uart1", "uart_sel", 23),
+ GATE_IFR2(CLK_IFR_UART2, "ifr_uart2", "uart_sel", 24),
+ GATE_IFR2(CLK_IFR_DSP_UART, "ifr_dsp_uart", "uart_sel", 26),
+ GATE_IFR2(CLK_IFR_GCE_26M, "ifr_gce_26m", "clk26m", 27),
+ GATE_IFR2(CLK_IFR_CQ_DMA_FPC, "ifr_cq_dma_fpc", "axi_sel", 28),
+ GATE_IFR2(CLK_IFR_BTIF, "ifr_btif", "axi_sel", 31),
+ /* IFR3 */
+ GATE_IFR3(CLK_IFR_SPI0, "ifr_spi0", "spi_sel", 1),
+ GATE_IFR3(CLK_IFR_MSDC0_HCLK, "ifr_msdc0", "msdc50_0_hc_sel", 2),
+ GATE_IFR3(CLK_IFR_MSDC2_HCLK, "ifr_msdc2", "msdc2_2_hc_sel", 3),
+ GATE_IFR3(CLK_IFR_MSDC1_HCLK, "ifr_msdc1", "axi_sel", 4),
+ GATE_IFR3(CLK_IFR_DVFSRC, "ifr_dvfsrc", "clk26m", 7),
+ GATE_IFR3(CLK_IFR_GCPU, "ifr_gcpu", "axi_sel", 8),
+ GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_sel", 9),
+ GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "clk26m", 10),
+ GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "clk26m", 14),
+ GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_sel", 18),
+ GATE_IFR3(CLK_IFR_DEBUGSYS, "ifr_debugsys", "axi_sel", 24),
+ GATE_IFR3(CLK_IFR_AUDIO, "ifr_audio", "axi_sel", 25),
+ /* IFR4 */
+ GATE_IFR4(CLK_IFR_PWM_FBCLK6, "ifr_pwm_fbclk6", "pwm_sel", 0),
+ GATE_IFR4(CLK_IFR_DISP_PWM, "ifr_disp_pwm", "disp_pwm_sel", 2),
+ GATE_IFR4(CLK_IFR_AUD_26M_BK, "ifr_aud_26m_bk", "clk26m", 4),
+ GATE_IFR4(CLK_IFR_CQ_DMA, "ifr_cq_dma", "axi_sel", 27),
+ /* IFR5 */
+ GATE_IFR5(CLK_IFR_MSDC0_SF, "ifr_msdc0_sf", "msdc50_0_sel", 0),
+ GATE_IFR5(CLK_IFR_MSDC1_SF, "ifr_msdc1_sf", "msdc50_0_sel", 1),
+ GATE_IFR5(CLK_IFR_MSDC2_SF, "ifr_msdc2_sf", "msdc50_0_sel", 2),
+ GATE_IFR5(CLK_IFR_AP_MSDC0, "ifr_ap_msdc0", "msdc50_0_sel", 7),
+ GATE_IFR5(CLK_IFR_MD_MSDC0, "ifr_md_msdc0", "msdc50_0_sel", 8),
+ GATE_IFR5(CLK_IFR_MSDC0_SRC, "ifr_msdc0_src", "msdc50_0_sel", 9),
+ GATE_IFR5(CLK_IFR_MSDC1_SRC, "ifr_msdc1_src", "msdc30_1_sel", 10),
+ GATE_IFR5(CLK_IFR_MSDC2_SRC, "ifr_msdc2_src", "msdc50_2_sel", 11),
+ GATE_IFR5(CLK_IFR_PWRAP_TMR, "ifr_pwrap_tmr", "clk26m", 12),
+ GATE_IFR5(CLK_IFR_PWRAP_SPI, "ifr_pwrap_spi", "clk26m", 13),
+ GATE_IFR5(CLK_IFR_PWRAP_SYS, "ifr_pwrap_sys", "clk26m", 14),
+ GATE_IFR5(CLK_IFR_IRRX_26M, "ifr_irrx_26m", "clk26m", 22),
+ GATE_IFR5(CLK_IFR_IRRX_32K, "ifr_irrx_32k", "clk32k", 23),
+ GATE_IFR5(CLK_IFR_I2C0_AXI, "ifr_i2c0_axi", "i2c_sel", 24),
+ GATE_IFR5(CLK_IFR_I2C1_AXI, "ifr_i2c1_axi", "i2c_sel", 25),
+ GATE_IFR5(CLK_IFR_I2C2_AXI, "ifr_i2c2_axi", "i2c_sel", 26),
+ GATE_IFR5(CLK_IFR_I2C3_AXI, "ifr_i2c3_axi", "i2c_sel", 27),
+ GATE_IFR5(CLK_IFR_NIC_AXI, "ifr_nic_axi", "axi_sel", 28),
+ GATE_IFR5(CLK_IFR_NIC_SLV_AXI, "ifr_nic_slv_axi", "axi_sel", 29),
+ GATE_IFR5(CLK_IFR_APU_AXI, "ifr_apu_axi", "axi_sel", 30),
+ /* IFR6 */
+ GATE_IFR6(CLK_IFR_NFIECC, "ifr_nfiecc", "nfiecc_sel", 0),
+ GATE_IFR6(CLK_IFR_NFI1X_BK, "ifr_nfi1x_bk", "nfi2x_sel", 1),
+ GATE_IFR6(CLK_IFR_NFIECC_BK, "ifr_nfiecc_bk", "nfi2x_sel", 2),
+ GATE_IFR6(CLK_IFR_NFI_BK, "ifr_nfi_bk", "axi_sel", 3),
+ GATE_IFR6(CLK_IFR_MSDC2_AP_BK, "ifr_msdc2_ap_bk", "axi_sel", 4),
+ GATE_IFR6(CLK_IFR_MSDC2_MD_BK, "ifr_msdc2_md_bk", "axi_sel", 5),
+ GATE_IFR6(CLK_IFR_MSDC2_BK, "ifr_msdc2_bk", "axi_sel", 6),
+ GATE_IFR6(CLK_IFR_SUSB_133_BK, "ifr_susb_133_bk", "axi_sel", 7),
+ GATE_IFR6(CLK_IFR_SUSB_66_BK, "ifr_susb_66_bk", "axi_sel", 8),
+ GATE_IFR6(CLK_IFR_SSUSB_SYS, "ifr_ssusb_sys", "ssusb_sys_sel", 9),
+ GATE_IFR6(CLK_IFR_SSUSB_REF, "ifr_ssusb_ref", "ssusb_sys_sel", 10),
+ GATE_IFR6(CLK_IFR_SSUSB_XHCI, "ifr_ssusb_xhci", "ssusb_xhci_sel", 11),
+};
+
+static const struct mtk_simple_gate peri_clks[] = {
+ { CLK_PERIAXI, "periaxi", "axi_sel", 0x20c, 31, 0 },
+};
+
+#define MT8365_PLL_FMAX (3800UL * MHZ)
+#define MT8365_PLL_FMIN (1500UL * MHZ)
+#define CON0_MT8365_RST_BAR BIT(23)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table, \
+ _rst_bar_mask, _pcw_chg_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8365_PLL_FMAX, \
+ .fmin = MT8365_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = 8, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, _rst_bar_mask, _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, NULL, _rst_bar_mask, \
+ _pcw_chg_reg) \
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 182500000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table dsppll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 600 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL, "armpll", 0x030C, 0x0318, 0x00000001, PLL_AO,
+ 22, 0x0310, 24, 0, 0, 0, 0x0310, 0, armpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0228, 0x0234, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x022C, 24, 0, 0, 0, 0x022C, 0,
+ CON0_MT8365_RST_BAR, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll2", 0x0208, 0x0214, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x020C, 24, 0, 0, 0, 0x020C, 0,
+ CON0_MT8365_RST_BAR, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0218, 0x0224, 0x00000001, 0, 22,
+ 0x021C, 24, 0, 0, 0, 0x021C, 0, mfgpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035C, 0x00000001, 0, 22,
+ 0x0354, 24, 0, 0, 0, 0x0354, 0, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0330, 0x033C, 0x00000001, 0, 22,
+ 0x0334, 24, 0, 0, 0, 0x0334, 0, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x031C, 0x032C, 0x00000001, 0, 32,
+ 0x0320, 24, 0x0040, 0x000C, 0, 0x0324, 0, 0, 0x0320),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0360, 0x0370, 0x00000001, 0, 32,
+ 0x0364, 24, 0x004C, 0x000C, 5, 0x0368, 0, 0, 0x0364),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0374, 0x0380, 0x00000001, 0, 22,
+ 0x0378, 24, 0, 0, 0, 0x0378, 0, 0, 0),
+ PLL_B(CLK_APMIXED_DSPPLL, "dsppll", 0x0390, 0x039C, 0x00000001, 0, 22,
+ 0x0394, 24, 0, 0, 0, 0x0394, 0, dsppll_div_table, 0, 0),
+ PLL(CLK_APMIXED_APUPLL, "apupll", 0x03A0, 0x03AC, 0x00000001, 0, 22,
+ 0x03A4, 24, 0, 0, 0, 0x03A4, 0, 0, 0),
+};
+
+static int clk_mt8365_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *hw;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ hw = devm_clk_hw_register_gate(dev, "univ_en", "univpll2", 0,
+ base + 0x204, 0, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_UNIV_EN] = hw;
+
+ hw = devm_clk_hw_register_gate(dev, "usb20_en", "univ_en", 0,
+ base + 0x204, 1, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_USB20_EN] = hw;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static int
+clk_mt8365_register_mtk_simple_gates(struct device *dev, void __iomem *base,
+ struct clk_hw_onecell_data *clk_data,
+ const struct mtk_simple_gate *gates,
+ unsigned int num_gates)
+{
+ unsigned int i;
+
+ for (i = 0; i != num_gates; ++i) {
+ const struct mtk_simple_gate *gate = &gates[i];
+ struct clk_hw *hw;
+
+ hw = devm_clk_hw_register_gate(dev, gate->name, gate->parent, 0,
+ base + gate->reg, gate->shift,
+ gate->gate_flags, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ clk_data->hws[gate->id] = hw;
+ }
+
+ return 0;
+}
+
+static int clk_mt8365_top_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+ int i;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_fixed_clks(top_fixed_clks,
+ ARRAY_SIZE(top_fixed_clks), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+ if (ret)
+ goto unregister_fixed_clks;
+
+ ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_factors;
+
+ ret = mtk_clk_register_composites(top_misc_mux_gates,
+ ARRAY_SIZE(top_misc_mux_gates), base,
+ &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_muxes;
+
+ for (i = 0; i != ARRAY_SIZE(top_misc_muxes); ++i) {
+ struct mt8365_clk_audio_mux *mux = &top_misc_muxes[i];
+ struct clk_hw *hw;
+
+ hw = devm_clk_hw_register_mux(dev, mux->name, apll_i2s0_parents,
+ ARRAY_SIZE(apll_i2s0_parents),
+ CLK_SET_RATE_PARENT, base + 0x320,
+ mux->shift, 1, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_composites;
+ }
+
+ clk_data->hws[mux->id] = hw;
+ }
+
+ ret = mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
+ top_clk_gates,
+ ARRAY_SIZE(top_clk_gates));
+ if (ret)
+ goto unregister_dividers;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_dividers;
+
+ return 0;
+unregister_dividers:
+ mtk_clk_unregister_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ clk_data);
+unregister_composites:
+ mtk_clk_unregister_composites(top_misc_mux_gates,
+ ARRAY_SIZE(top_misc_mux_gates), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(top_fixed_clks,
+ ARRAY_SIZE(top_fixed_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_infra_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(ifr_clks, ARRAY_SIZE(ifr_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_peri_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
+ peri_clks,
+ ARRAY_SIZE(peri_clks));
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
+ base, &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
+ clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8365[] = {
+ {
+ .compatible = "mediatek,mt8365-apmixedsys",
+ .data = clk_mt8365_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt8365-topckgen",
+ .data = clk_mt8365_top_probe,
+ }, {
+ .compatible = "mediatek,mt8365-infracfg",
+ .data = clk_mt8365_infra_probe,
+ }, {
+ .compatible = "mediatek,mt8365-pericfg",
+ .data = clk_mt8365_peri_probe,
+ }, {
+ .compatible = "mediatek,mt8365-mcucfg",
+ .data = clk_mt8365_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt8365_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *pdev);
+ int ret;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ ret = clk_probe(pdev);
+ if (ret)
+ dev_err(&pdev->dev,
+ "%s: could not register clock provider: %d\n",
+ pdev->name, ret);
+
+ return ret;
+}
+
+static struct platform_driver clk_mt8365_drv = {
+ .probe = clk_mt8365_probe,
+ .driver = {
+ .name = "clk-mt8365",
+ .of_match_table = of_match_clk_mt8365,
+ },
+};
+
+static int __init clk_mt8365_init(void)
+{
+ return platform_driver_register(&clk_mt8365_drv);
+}
+arch_initcall(clk_mt8365_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 05a188c62119..d31f01d0ba1c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -18,19 +18,42 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
+ unsigned int clk_num)
{
int i;
+
+ clk_data->num = clk_num;
+
+ for (i = 0; i < clk_num; i++)
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+}
+
+struct clk_hw_onecell_data *mtk_devm_alloc_clk_data(struct device *dev,
+ unsigned int clk_num)
+{
struct clk_hw_onecell_data *clk_data;
- clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL);
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, clk_num),
+ GFP_KERNEL);
if (!clk_data)
return NULL;
- clk_data->num = clk_num;
+ mtk_init_clk_data(clk_data, clk_num);
- for (i = 0; i < clk_num; i++)
- clk_data->hws[i] = ERR_PTR(-ENOENT);
+ return clk_data;
+}
+EXPORT_SYMBOL_GPL(mtk_devm_alloc_clk_data);
+
+struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+{
+ struct clk_hw_onecell_data *clk_data;
+
+ clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL);
+ if (!clk_data)
+ return NULL;
+
+ mtk_init_clk_data(clk_data, clk_num);
return clk_data;
}
@@ -80,7 +103,7 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_hw_unregister_fixed_rate(clk_data->hws[rc->id]);
clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
@@ -102,7 +125,7 @@ void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_hw_unregister_fixed_rate(clk_data->hws[rc->id]);
clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
}
@@ -146,7 +169,7 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_hw_unregister_fixed_factor(clk_data->hws[ff->id]);
clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
@@ -168,7 +191,7 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_hw_unregister_fixed_factor(clk_data->hws[ff->id]);
clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
}
@@ -393,12 +416,13 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- mtk_clk_unregister_composite(clk_data->hws[mcd->id]);
+ clk_hw_unregister_divider(clk_data->hws[mcd->id]);
clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_dividers);
void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
struct clk_hw_onecell_data *clk_data)
@@ -414,10 +438,11 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- clk_unregister_divider(clk_data->hws[mcd->id]->clk);
+ clk_hw_unregister_divider(clk_data->hws[mcd->id]);
clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_dividers);
int mtk_clk_simple_probe(struct platform_device *pdev)
{
@@ -434,7 +459,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, mcd->clks, mcd->num_clks, clk_data);
+ r = mtk_clk_register_gates_with_dev(node, mcd->clks, mcd->num_clks,
+ clk_data, &pdev->dev);
if (r)
goto free_data;
@@ -459,6 +485,7 @@ free_data:
mtk_free_clk_data(clk_data);
return r;
}
+EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
int mtk_clk_simple_remove(struct platform_device *pdev)
{
@@ -472,5 +499,6 @@ int mtk_clk_simple_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 1b95c484d5aa..63ae7941aa92 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -184,10 +184,13 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
struct clk_hw_onecell_data *clk_data);
struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+struct clk_hw_onecell_data *mtk_devm_alloc_clk_data(struct device *dev,
+ unsigned int clk_num);
void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
+void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw);
struct mtk_clk_desc {
const struct mtk_gate *clks;
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index cd5f9fd8cb98..ba1720b9e231 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -4,6 +4,7 @@
* Author: Owen Chen <owen.chen@mediatek.com>
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
@@ -128,9 +129,18 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0;
}
+static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+}
+
const struct clk_ops mtk_mux_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
@@ -140,6 +150,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.is_enabled = mtk_clk_mux_is_enabled,
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
@@ -259,4 +270,41 @@ void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
+/*
+ * This clock notifier is called when the frequency of the parent
+ * PLL clock is to be changed. The idea is to switch the parent to a
+ * stable clock, such as the main oscillator, while the PLL frequency
+ * stabilizes.
+ */
+static int mtk_clk_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *_data)
+{
+ struct clk_notifier_data *data = _data;
+ struct clk_hw *hw = __clk_get_hw(data->clk);
+ struct mtk_mux_nb *mux_nb = to_mtk_mux_nb(nb);
+ int ret = 0;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ mux_nb->original_index = mux_nb->ops->get_parent(hw);
+ ret = mux_nb->ops->set_parent(hw, mux_nb->bypass_index);
+ break;
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ ret = mux_nb->ops->set_parent(hw, mux_nb->original_index);
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
+ struct mtk_mux_nb *mux_nb)
+{
+ mux_nb->nb.notifier_call = mtk_clk_mux_notifier_cb;
+
+ return devm_clk_notifier_register(dev, clk, &mux_nb->nb);
+}
+EXPORT_SYMBOL_GPL(devm_mtk_clk_mux_notifier_register);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 6539c58f5d7d..83ff420f4ebe 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -7,12 +7,14 @@
#ifndef __DRV_CLK_MTK_MUX_H
#define __DRV_CLK_MTK_MUX_H
+#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/types.h>
struct clk;
struct clk_hw_onecell_data;
struct clk_ops;
+struct device;
struct device_node;
struct mtk_mux {
@@ -89,4 +91,17 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
struct clk_hw_onecell_data *clk_data);
+struct mtk_mux_nb {
+ struct notifier_block nb;
+ const struct clk_ops *ops;
+
+ u8 bypass_index; /* Which parent to temporarily use */
+ u8 original_index; /* Set by notifier callback */
+};
+
+#define to_mtk_mux_nb(_nb) container_of(_nb, struct mtk_mux_nb, nb)
+
+int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
+ struct mtk_mux_nb *mux_nb);
+
#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 179505549a7c..290ceda84ce4 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -228,5 +228,6 @@ int mtk_register_reset_controller_with_dev(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_register_reset_controller_with_dev);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 27cd2c1f3f61..434cd8f9de82 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -38,6 +38,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
struct meson_aoclk_reset_controller *rstc;
struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *regmap;
int ret, clkid;
@@ -49,7 +50,9 @@ int meson_aoclkc_probe(struct platform_device *pdev)
if (!rstc)
return -ENOMEM;
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 8d5a5dab955a..0e5e6b57eb20 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -18,6 +18,7 @@ int meson_eeclkc_probe(struct platform_device *pdev)
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *map;
int ret, i;
@@ -26,7 +27,9 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return -EINVAL;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 8f3b7a94a667..827e78fb16a8 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -3792,12 +3792,15 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
+ struct device_node *parent_np;
const char *notifier_clk_name;
struct clk *notifier_clk;
struct regmap *map;
int i, ret;
- map = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
pr_err("failed to get HHI regmap - Trying obsolete regs\n");
return;
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
index a5a99873c4f5..b46e864b3bd8 100644
--- a/drivers/clk/microchip/Kconfig
+++ b/drivers/clk/microchip/Kconfig
@@ -6,5 +6,6 @@ config COMMON_CLK_PIC32
config MCHP_CLK_MPFS
bool "Clk driver for PolarFire SoC"
depends on (RISCV && SOC_MICROCHIP_POLARFIRE) || COMPILE_TEST
+ select AUXILIARY_BUS
help
Supports Clock Configuration for PolarFire SoC
diff --git a/drivers/clk/microchip/Makefile b/drivers/clk/microchip/Makefile
index 5fa6dcf30a9a..13250e04e46c 100644
--- a/drivers/clk/microchip/Makefile
+++ b/drivers/clk/microchip/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_COMMON_CLK_PIC32) += clk-core.o
obj-$(CONFIG_PIC32MZDA) += clk-pic32mzda.o
obj-$(CONFIG_MCHP_CLK_MPFS) += clk-mpfs.o
+obj-$(CONFIG_MCHP_CLK_MPFS) += clk-mpfs-ccc.o
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
new file mode 100644
index 000000000000..7be028dced63
--- /dev/null
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
+ */
+#include "asm-generic/errno-base.h"
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/microchip,mpfs-clock.h>
+
+/* address offset of control registers */
+#define MPFS_CCC_PLL_CR 0x04u
+#define MPFS_CCC_REF_CR 0x08u
+#define MPFS_CCC_SSCG_2_CR 0x2Cu
+#define MPFS_CCC_POSTDIV01_CR 0x10u
+#define MPFS_CCC_POSTDIV23_CR 0x14u
+
+#define MPFS_CCC_FBDIV_SHIFT 0x00u
+#define MPFS_CCC_FBDIV_WIDTH 0x0Cu
+#define MPFS_CCC_POSTDIV0_SHIFT 0x08u
+#define MPFS_CCC_POSTDIV1_SHIFT 0x18u
+#define MPFS_CCC_POSTDIV2_SHIFT MPFS_CCC_POSTDIV0_SHIFT
+#define MPFS_CCC_POSTDIV3_SHIFT MPFS_CCC_POSTDIV1_SHIFT
+#define MPFS_CCC_POSTDIV_WIDTH 0x06u
+#define MPFS_CCC_REFCLK_SEL BIT(6)
+#define MPFS_CCC_REFDIV_SHIFT 0x08u
+#define MPFS_CCC_REFDIV_WIDTH 0x06u
+
+#define MPFS_CCC_FIXED_DIV 4
+#define MPFS_CCC_OUTPUTS_PER_PLL 4
+#define MPFS_CCC_REFS_PER_PLL 2
+
+struct mpfs_ccc_data {
+ void __iomem **pll_base;
+ struct device *dev;
+ struct clk_hw_onecell_data hw_data;
+};
+
+struct mpfs_ccc_pll_hw_clock {
+ void __iomem *base;
+ const char *name;
+ const struct clk_parent_data *parents;
+ unsigned int id;
+ u32 reg_offset;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+#define to_mpfs_ccc_clk(_hw) container_of(_hw, struct mpfs_ccc_pll_hw_clock, hw)
+
+/*
+ * mpfs_ccc_lock prevents anything else from writing to a fabric ccc
+ * while a software locked register is being written.
+ */
+static DEFINE_SPINLOCK(mpfs_ccc_lock);
+
+static const struct clk_parent_data mpfs_ccc_pll0_refs[] = {
+ { .fw_name = "pll0_ref0" },
+ { .fw_name = "pll0_ref1" },
+};
+
+static const struct clk_parent_data mpfs_ccc_pll1_refs[] = {
+ { .fw_name = "pll1_ref0" },
+ { .fw_name = "pll1_ref1" },
+};
+
+static unsigned long mpfs_ccc_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_ccc_pll_hw_clock *ccc_hw = to_mpfs_ccc_clk(hw);
+ void __iomem *mult_addr = ccc_hw->base + ccc_hw->reg_offset;
+ void __iomem *ref_div_addr = ccc_hw->base + MPFS_CCC_REF_CR;
+ u32 mult, ref_div;
+
+ mult = readl_relaxed(mult_addr) >> MPFS_CCC_FBDIV_SHIFT;
+ mult &= clk_div_mask(MPFS_CCC_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MPFS_CCC_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MPFS_CCC_REFDIV_WIDTH);
+
+ return prate * mult / (ref_div * MPFS_CCC_FIXED_DIV);
+}
+
+static u8 mpfs_ccc_pll_get_parent(struct clk_hw *hw)
+{
+ struct mpfs_ccc_pll_hw_clock *ccc_hw = to_mpfs_ccc_clk(hw);
+ void __iomem *pll_cr_addr = ccc_hw->base + MPFS_CCC_PLL_CR;
+
+ return !!(readl_relaxed(pll_cr_addr) & MPFS_CCC_REFCLK_SEL);
+}
+
+static const struct clk_ops mpfs_ccc_pll_ops = {
+ .recalc_rate = mpfs_ccc_pll_recalc_rate,
+ .get_parent = mpfs_ccc_pll_get_parent,
+};
+
+#define CLK_CCC_PLL(_id, _parents, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .shift = _shift, \
+ .width = _width, \
+ .reg_offset = _offset, \
+ .flags = _flags, \
+ .parents = _parents, \
+}
+
+static struct mpfs_ccc_pll_hw_clock mpfs_ccc_pll_clks[] = {
+ CLK_CCC_PLL(CLK_CCC_PLL0, mpfs_ccc_pll0_refs, MPFS_CCC_FBDIV_SHIFT,
+ MPFS_CCC_FBDIV_WIDTH, 0, MPFS_CCC_SSCG_2_CR),
+ CLK_CCC_PLL(CLK_CCC_PLL1, mpfs_ccc_pll1_refs, MPFS_CCC_FBDIV_SHIFT,
+ MPFS_CCC_FBDIV_WIDTH, 0, MPFS_CCC_SSCG_2_CR),
+};
+
+struct mpfs_ccc_out_hw_clock {
+ struct clk_divider divider;
+ struct clk_init_data init;
+ unsigned int id;
+ u32 reg_offset;
+};
+
+#define CLK_CCC_OUT(_id, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .divider.shift = _shift, \
+ .divider.width = _width, \
+ .reg_offset = _offset, \
+ .divider.flags = _flags, \
+ .divider.lock = &mpfs_ccc_lock, \
+}
+
+static struct mpfs_ccc_out_hw_clock mpfs_ccc_pll0out_clks[] = {
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT0, MPFS_CCC_POSTDIV0_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT1, MPFS_CCC_POSTDIV1_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT2, MPFS_CCC_POSTDIV2_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT3, MPFS_CCC_POSTDIV3_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+};
+
+static struct mpfs_ccc_out_hw_clock mpfs_ccc_pll1out_clks[] = {
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT0, MPFS_CCC_POSTDIV0_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT1, MPFS_CCC_POSTDIV1_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT2, MPFS_CCC_POSTDIV2_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT3, MPFS_CCC_POSTDIV3_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+};
+
+static struct mpfs_ccc_out_hw_clock *mpfs_ccc_pllout_clks[] = {
+ mpfs_ccc_pll0out_clks, mpfs_ccc_pll1out_clks
+};
+
+static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_clock *out_hws,
+ unsigned int num_clks, struct mpfs_ccc_data *data,
+ struct mpfs_ccc_pll_hw_clock *parent)
+{
+ int ret;
+
+ for (unsigned int i = 0; i < num_clks; i++) {
+ struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
+ char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+
+ snprintf(name, 23, "%s_out%u", parent->name, i);
+ out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
+ out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
+ out_hw->reg_offset;
+
+ ret = devm_clk_hw_register(dev, &out_hw->divider.hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
+ out_hw->id);
+
+ data->hw_data.hws[out_hw->id] = &out_hw->divider.hw;
+ }
+
+ return 0;
+}
+
+#define CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = _parents, \
+ .num_parents = MPFS_CCC_REFS_PER_PLL, \
+ .ops = _ops, \
+ })
+
+static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clock *pll_hws,
+ unsigned int num_clks, struct mpfs_ccc_data *data)
+{
+ int ret;
+
+ for (unsigned int i = 0; i < num_clks; i++) {
+ struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
+ char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
+
+ pll_hw->base = data->pll_base[i];
+ snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
+ pll_hw->name = (const char *)name;
+ pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
+ pll_hw->parents,
+ &mpfs_ccc_pll_ops, 0);
+
+ ret = devm_clk_hw_register(dev, &pll_hw->hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register ccc id: %d\n",
+ pll_hw->id);
+
+ data->hw_data.hws[pll_hw->id] = &pll_hw->hw;
+
+ ret = mpfs_ccc_register_outputs(dev, mpfs_ccc_pllout_clks[i],
+ MPFS_CCC_OUTPUTS_PER_PLL, data, pll_hw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpfs_ccc_probe(struct platform_device *pdev)
+{
+ struct mpfs_ccc_data *clk_data;
+ void __iomem *pll_base[ARRAY_SIZE(mpfs_ccc_pll_clks)];
+ unsigned int num_clks;
+ int ret;
+
+ num_clks = ARRAY_SIZE(mpfs_ccc_pll_clks) + ARRAY_SIZE(mpfs_ccc_pll0out_clks) +
+ ARRAY_SIZE(mpfs_ccc_pll1out_clks);
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hw_data.hws, num_clks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ pll_base[0] = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pll_base[0]))
+ return PTR_ERR(pll_base[0]);
+
+ pll_base[1] = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pll_base[1]))
+ return PTR_ERR(pll_base[1]);
+
+ clk_data->pll_base = pll_base;
+ clk_data->hw_data.num = num_clks;
+ clk_data->dev = &pdev->dev;
+
+ ret = mpfs_ccc_register_plls(clk_data->dev, mpfs_ccc_pll_clks,
+ ARRAY_SIZE(mpfs_ccc_pll_clks), clk_data);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(clk_data->dev, of_clk_hw_onecell_get,
+ &clk_data->hw_data);
+}
+
+static const struct of_device_id mpfs_ccc_of_match_table[] = {
+ { .compatible = "microchip,mpfs-ccc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpfs_ccc_of_match_table);
+
+static struct platform_driver mpfs_ccc_driver = {
+ .probe = mpfs_ccc_probe,
+ .driver = {
+ .name = "microchip-mpfs-ccc",
+ .of_match_table = mpfs_ccc_of_match_table,
+ },
+};
+
+static int __init clk_ccc_init(void)
+{
+ return platform_driver_register(&mpfs_ccc_driver);
+}
+core_initcall(clk_ccc_init);
+
+static void __exit clk_ccc_exit(void)
+{
+ platform_driver_unregister(&mpfs_ccc_driver);
+}
+module_exit(clk_ccc_exit);
+
+MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index b6b89413e090..4f0a19db7ed7 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -1,14 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Daire McNamara,<daire.mcnamara@microchip.com>
- * Copyright (C) 2020 Microchip Technology Inc. All rights reserved.
+ * PolarFire SoC MSS/core complex clock control
+ *
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
+#include <soc/microchip/mpfs.h>
/* address offset of control registers */
#define REG_MSSPLL_REF_CR 0x08u
@@ -28,6 +31,7 @@
#define MSSPLL_FIXED_DIV 4u
struct mpfs_clock_data {
+ struct device *dev;
void __iomem *base;
void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
@@ -46,37 +50,18 @@ struct mpfs_msspll_hw_clock {
#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw)
-struct mpfs_cfg_clock {
- const struct clk_div_table *table;
- unsigned int id;
- u32 reg_offset;
- u8 shift;
- u8 width;
- u8 flags;
-};
-
struct mpfs_cfg_hw_clock {
- struct mpfs_cfg_clock cfg;
- void __iomem *sys_base;
- struct clk_hw hw;
+ struct clk_divider cfg;
struct clk_init_data init;
-};
-
-#define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw)
-
-struct mpfs_periph_clock {
unsigned int id;
- u8 shift;
+ u32 reg_offset;
};
struct mpfs_periph_hw_clock {
- struct mpfs_periph_clock periph;
- void __iomem *sys_base;
- struct clk_hw hw;
+ struct clk_gate periph;
+ unsigned int id;
};
-#define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw)
-
/*
* mpfs_clk_lock prevents anything else from writing to the
* mpfs clk block while a software locked register is being written.
@@ -126,8 +111,62 @@ static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned lon
return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv);
}
+static long mpfs_clk_msspll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ u32 mult, ref_div;
+ unsigned long rate_before_ctrl;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+
+ rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
+
+ return divider_round_rate(hw, rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
+ msspll_hw->flags);
+}
+
+static int mpfs_clk_msspll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
+ u32 mult, ref_div, postdiv;
+ int divider_setting;
+ unsigned long rate_before_ctrl, flags;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+
+ rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
+ divider_setting = divider_get_val(rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
+ msspll_hw->flags);
+
+ if (divider_setting < 0)
+ return divider_setting;
+
+ spin_lock_irqsave(&mpfs_clk_lock, flags);
+
+ postdiv = readl_relaxed(postdiv_addr);
+ postdiv &= ~(clk_div_mask(MSSPLL_POSTDIV_WIDTH) << MSSPLL_POSTDIV_SHIFT);
+ writel_relaxed(postdiv, postdiv_addr);
+
+ spin_unlock_irqrestore(&mpfs_clk_lock, flags);
+
+ return 0;
+}
+
static const struct clk_ops mpfs_clk_msspll_ops = {
.recalc_rate = mpfs_clk_msspll_recalc_rate,
+ .round_rate = mpfs_clk_msspll_round_rate,
+ .set_rate = mpfs_clk_msspll_set_rate,
};
#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \
@@ -144,25 +183,17 @@ static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = {
MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR),
};
-static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw,
- void __iomem *base)
-{
- msspll_hw->base = base;
-
- return devm_clk_hw_register(dev, &msspll_hw->hw);
-}
-
static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws,
unsigned int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *base = data->msspll_base;
unsigned int i;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i];
- ret = mpfs_clk_register_msspll(dev, msspll_hw, base);
+ msspll_hw->base = data->msspll_base;
+ ret = devm_clk_hw_register(dev, &msspll_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register msspll id: %d\n",
CLK_MSSPLL);
@@ -177,66 +208,15 @@ static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_c
* "CFG" clocks
*/
-static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
- void __iomem *base_addr = cfg_hw->sys_base;
- u32 val;
-
- val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift;
- val &= clk_div_mask(cfg->width);
-
- return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
-}
-
-static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
-
- return divider_round_rate(hw, rate, prate, cfg->table, cfg->width, 0);
-}
-
-static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
- void __iomem *base_addr = cfg_hw->sys_base;
- unsigned long flags;
- u32 val;
- int divider_setting;
-
- divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
-
- if (divider_setting < 0)
- return divider_setting;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
- val = readl_relaxed(base_addr + cfg->reg_offset);
- val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift);
- val |= divider_setting << cfg->shift;
- writel_relaxed(val, base_addr + cfg->reg_offset);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-
- return 0;
-}
-
-static const struct clk_ops mpfs_clk_cfg_ops = {
- .recalc_rate = mpfs_cfg_clk_recalc_rate,
- .round_rate = mpfs_cfg_clk_round_rate,
- .set_rate = mpfs_cfg_clk_set_rate,
-};
-
#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
- .cfg.id = _id, \
+ .id = _id, \
.cfg.shift = _shift, \
.cfg.width = _width, \
.cfg.table = _table, \
- .cfg.reg_offset = _offset, \
+ .reg_offset = _offset, \
.cfg.flags = _flags, \
- .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
+ .cfg.hw.init = CLK_HW_INIT(_name, _parent, &clk_divider_ops, 0), \
+ .cfg.lock = &mpfs_clk_lock, \
}
#define CLK_CPU_OFFSET 0u
@@ -252,42 +232,34 @@ static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0,
REG_CLOCK_CONFIG_CR),
{
- .cfg.id = CLK_RTCREF,
+ .id = CLK_RTCREF,
.cfg.shift = 0,
.cfg.width = 12,
.cfg.table = mpfs_div_rtcref_table,
- .cfg.reg_offset = REG_RTC_CLOCK_CR,
+ .reg_offset = REG_RTC_CLOCK_CR,
.cfg.flags = CLK_DIVIDER_ONE_BASED,
- .hw.init =
- CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
+ .cfg.hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &clk_divider_ops, 0),
}
};
-static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw,
- void __iomem *sys_base)
-{
- cfg_hw->sys_base = sys_base;
-
- return devm_clk_hw_register(dev, &cfg_hw->hw);
-}
-
static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hws,
unsigned int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *sys_base = data->base;
unsigned int i, id;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i];
- ret = mpfs_clk_register_cfg(dev, cfg_hw, sys_base);
+ cfg_hw->cfg.reg = data->base + cfg_hw->reg_offset;
+ ret = devm_clk_hw_register(dev, &cfg_hw->cfg.hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
- cfg_hw->cfg.id);
+ cfg_hw->id);
- id = cfg_hw->cfg.id;
- data->hw_data.hws[id] = &cfg_hw->hw;
+ id = cfg_hw->id;
+ data->hw_data.hws[id] = &cfg_hw->cfg.hw;
}
return 0;
@@ -297,77 +269,15 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
* peripheral clocks - devices connected to axi or ahb buses.
*/
-static int mpfs_periph_clk_enable(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg, val;
- unsigned long flags;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- val = reg & ~(1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- val = reg | (1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-
- return 0;
-}
-
-static void mpfs_periph_clk_disable(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg, val;
- unsigned long flags;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- val = reg & ~(1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-}
-
-static int mpfs_periph_clk_is_enabled(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg;
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- if ((reg & (1u << periph->shift)) == 0u) {
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- if (reg & (1u << periph->shift))
- return 1;
- }
-
- return 0;
-}
-
-static const struct clk_ops mpfs_periph_clk_ops = {
- .enable = mpfs_periph_clk_enable,
- .disable = mpfs_periph_clk_disable,
- .is_enabled = mpfs_periph_clk_is_enabled,
-};
-
#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
- .periph.id = _id, \
- .periph.shift = _shift, \
- .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops, \
+ .id = _id, \
+ .periph.bit_idx = _shift, \
+ .periph.hw.init = CLK_HW_INIT_HW(_name, _parent, &clk_gate_ops, \
_flags), \
+ .periph.lock = &mpfs_clk_lock, \
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].cfg.hw)
/*
* Critical clocks:
@@ -415,36 +325,116 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0),
};
-static int mpfs_clk_register_periph(struct device *dev, struct mpfs_periph_hw_clock *periph_hw,
- void __iomem *sys_base)
-{
- periph_hw->sys_base = sys_base;
-
- return devm_clk_hw_register(dev, &periph_hw->hw);
-}
-
static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_clock *periph_hws,
int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *sys_base = data->base;
unsigned int i, id;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i];
- ret = mpfs_clk_register_periph(dev, periph_hw, sys_base);
+ periph_hw->periph.reg = data->base + REG_SUBBLK_CLOCK_CR;
+ ret = devm_clk_hw_register(dev, &periph_hw->periph.hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
- periph_hw->periph.id);
+ periph_hw->id);
+
+ id = periph_hws[i].id;
+ data->hw_data.hws[id] = &periph_hw->periph.hw;
+ }
+
+ return 0;
+}
- id = periph_hws[i].periph.id;
- data->hw_data.hws[id] = &periph_hw->hw;
+/*
+ * Peripheral clock resets
+ */
+
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
+
+u32 mpfs_reset_read(struct device *dev)
+{
+ struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
+
+ return readl_relaxed(clock_data->base + REG_SUBBLK_RESET_CR);
+}
+EXPORT_SYMBOL_NS_GPL(mpfs_reset_read, MCHP_CLK_MPFS);
+
+void mpfs_reset_write(struct device *dev, u32 val)
+{
+ struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
+
+ writel_relaxed(val, clock_data->base + REG_SUBBLK_RESET_CR);
+}
+EXPORT_SYMBOL_NS_GPL(mpfs_reset_write, MCHP_CLK_MPFS);
+
+static void mpfs_reset_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+}
+
+static void mpfs_reset_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ auxiliary_device_uninit(adev);
+
+ kfree(adev);
+}
+
+static struct auxiliary_device *mpfs_reset_adev_alloc(struct mpfs_clock_data *clk_data)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ adev->name = "reset-mpfs";
+ adev->dev.parent = clk_data->dev;
+ adev->dev.release = mpfs_reset_adev_release;
+ adev->id = 666u;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ return adev;
+}
+
+static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = mpfs_reset_adev_alloc(clk_data);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
}
+ return devm_add_action_or_reset(clk_data->dev, mpfs_reset_unregister_adev, adev);
+}
+
+#else /* !CONFIG_RESET_CONTROLLER */
+
+static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
+{
return 0;
}
+#endif /* !CONFIG_RESET_CONTROLLER */
+
static int mpfs_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -469,6 +459,8 @@ static int mpfs_clk_probe(struct platform_device *pdev)
return PTR_ERR(clk_data->msspll_base);
clk_data->hw_data.num = num_clks;
+ clk_data->dev = dev;
+ dev_set_drvdata(dev, clk_data);
ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks),
clk_data);
@@ -488,14 +480,14 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- return ret;
+ return mpfs_reset_controller_register(clk_data);
}
static const struct of_device_id mpfs_clk_of_match_table[] = {
{ .compatible = "microchip,mpfs-clkcfg", },
{}
};
-MODULE_DEVICE_TABLE(of, mpfs_clk_match_table);
+MODULE_DEVICE_TABLE(of, mpfs_clk_of_match_table);
static struct platform_driver mpfs_clk_driver = {
.probe = mpfs_clk_probe,
@@ -518,4 +510,7 @@ static void __exit clk_mpfs_exit(void)
module_exit(clk_mpfs_exit);
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
-MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 48dfb18b490e..130d1a723879 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -19,9 +19,6 @@
#include "clk.h"
#include "reset.h"
-#define APBC_RTC 0x28
-#define APBC_TWSI0 0x2c
-#define APBC_KPC 0x30
#define APBC_UART0 0x0
#define APBC_UART1 0x4
#define APBC_GPIO 0x8
@@ -29,20 +26,40 @@
#define APBC_PWM1 0x10
#define APBC_PWM2 0x14
#define APBC_PWM3 0x18
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x30
#define APBC_TIMER 0x34
+#define APBC_AIB 0x3c
+#define APBC_SW_JTAG 0x40
+#define APBC_ONEWIRE 0x48
+#define APBC_TWSI1 0x6c
+#define APBC_UART2 0x70
+#define APBC_AC97 0x84
#define APBC_SSP0 0x81c
#define APBC_SSP1 0x820
#define APBC_SSP2 0x84c
#define APBC_SSP3 0x858
#define APBC_SSP4 0x85c
-#define APBC_TWSI1 0x6c
-#define APBC_UART2 0x70
+#define APMU_DISP0 0x4c
+#define APMU_CCIC0 0x50
#define APMU_SDH0 0x54
#define APMU_SDH1 0x58
#define APMU_USB 0x5c
-#define APMU_DISP0 0x4c
-#define APMU_CCIC0 0x50
#define APMU_DFC 0x60
+#define APMU_DMA 0x64
+#define APMU_BUS 0x6c
+#define APMU_GC 0xcc
+#define APMU_SMC 0xd4
+#define APMU_XD 0xdc
+#define APMU_SDH2 0xe0
+#define APMU_SDH3 0xe4
+#define APMU_CF 0xf0
+#define APMU_MSP 0xf4
+#define APMU_CMU 0xf8
+#define APMU_FE 0xfc
+#define APMU_PCIE 0x100
+#define APMU_EPD 0x104
#define MPMU_UART_PLL 0x14
struct pxa168_clk_unit {
@@ -71,9 +88,12 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
{PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
{PXA168_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0},
{PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
- {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
- {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+ {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 1, 5, 0},
+ {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 1, 5, 0},
{PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+ {PXA168_CLK_PLL1_2_1_10, "pll1_2_1_10", "pll1_2", 1, 10, 0},
+ {PXA168_CLK_PLL1_2_3_16, "pll1_2_3_16", "pll1_2", 3, 16, 0},
+ {PXA168_CLK_CLK32_2, "clk32_2", "clk32", 1, 2, 0},
};
static struct mmp_clk_factor_masks uart_factor_masks = {
@@ -107,24 +127,44 @@ static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
}
+static DEFINE_SPINLOCK(twsi0_lock);
+static DEFINE_SPINLOCK(twsi1_lock);
+static const char * const twsi_parent_names[] = {"pll1_2_1_10", "pll1_2_1_5"};
+
+static DEFINE_SPINLOCK(kpc_lock);
+static const char * const kpc_parent_names[] = {"clk32", "clk32_2", "pll1_24"};
+
+static DEFINE_SPINLOCK(pwm0_lock);
+static DEFINE_SPINLOCK(pwm1_lock);
+static DEFINE_SPINLOCK(pwm2_lock);
+static DEFINE_SPINLOCK(pwm3_lock);
+static const char * const pwm_parent_names[] = {"pll1_48", "clk32"};
+
static DEFINE_SPINLOCK(uart0_lock);
static DEFINE_SPINLOCK(uart1_lock);
static DEFINE_SPINLOCK(uart2_lock);
-static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+static const char * const uart_parent_names[] = {"pll1_2_3_16", "uart_pll"};
static DEFINE_SPINLOCK(ssp0_lock);
static DEFINE_SPINLOCK(ssp1_lock);
static DEFINE_SPINLOCK(ssp2_lock);
static DEFINE_SPINLOCK(ssp3_lock);
static DEFINE_SPINLOCK(ssp4_lock);
-static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+static const char * const ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"};
+static const char * const timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"};
static DEFINE_SPINLOCK(reset_lock);
static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "twsi0_mux", twsi_parent_names, ARRAY_SIZE(twsi_parent_names), CLK_SET_RATE_PARENT, APBC_TWSI0, 4, 3, 0, &twsi0_lock},
+ {0, "twsi1_mux", twsi_parent_names, ARRAY_SIZE(twsi_parent_names), CLK_SET_RATE_PARENT, APBC_TWSI1, 4, 3, 0, &twsi1_lock},
+ {0, "kpc_mux", kpc_parent_names, ARRAY_SIZE(kpc_parent_names), CLK_SET_RATE_PARENT, APBC_KPC, 4, 3, 0, &kpc_lock},
+ {0, "pwm0_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM0, 4, 3, 0, &pwm0_lock},
+ {0, "pwm1_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM1, 4, 3, 0, &pwm1_lock},
+ {0, "pwm2_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM2, 4, 3, 0, &pwm2_lock},
+ {0, "pwm3_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM3, 4, 3, 0, &pwm3_lock},
{0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
{0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
{0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
@@ -137,16 +177,15 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = {
};
static struct mmp_param_gate_clk apbc_gate_clks[] = {
- {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA168_CLK_TWSI0, "twsi0_clk", "twsi0_mux", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &twsi0_lock},
+ {PXA168_CLK_TWSI1, "twsi1_clk", "twsi1_mux", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &twsi1_lock},
+ {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x1, 0x1, 0x0, 0, &reset_lock},
+ {PXA168_CLK_KPC, "kpc_clk", "kpc_mux", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &kpc_lock},
{PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
- {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
- /* The gate clocks has mux parent. */
+ {PXA168_CLK_PWM0, "pwm0_clk", "pwm0_mux", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &pwm0_lock},
+ {PXA168_CLK_PWM1, "pwm1_clk", "pwm1_mux", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &pwm1_lock},
+ {PXA168_CLK_PWM2, "pwm2_clk", "pwm2_mux", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &pwm2_lock},
+ {PXA168_CLK_PWM3, "pwm3_clk", "pwm3_mux", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &pwm3_lock},
{PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
{PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
{PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
@@ -170,22 +209,30 @@ static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
}
+static DEFINE_SPINLOCK(dfc_lock);
+static const char * const dfc_parent_names[] = {"pll1_4", "pll1_8"};
+
static DEFINE_SPINLOCK(sdh0_lock);
static DEFINE_SPINLOCK(sdh1_lock);
-static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+static DEFINE_SPINLOCK(sdh2_lock);
+static DEFINE_SPINLOCK(sdh3_lock);
+static const char * const sdh_parent_names[] = {"pll1_13", "pll1_12", "pll1_8"};
static DEFINE_SPINLOCK(usb_lock);
static DEFINE_SPINLOCK(disp0_lock);
-static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+static const char * const disp_parent_names[] = {"pll1", "pll1_2"};
static DEFINE_SPINLOCK(ccic0_lock);
-static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
-static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+static const char * const ccic_parent_names[] = {"pll1_4", "pll1_8"};
+static const char * const ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
static struct mmp_param_mux_clk apmu_mux_clks[] = {
- {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
- {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+ {0, "dfc_mux", dfc_parent_names, ARRAY_SIZE(dfc_parent_names), CLK_SET_RATE_PARENT, APMU_DFC, 6, 1, 0, &dfc_lock},
+ {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 2, 0, &sdh0_lock},
+ {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 2, 0, &sdh1_lock},
+ {0, "sdh2_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH2, 6, 2, 0, &sdh2_lock},
+ {0, "sdh3_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH3, 6, 2, 0, &sdh3_lock},
{0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
{0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
{0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
@@ -196,12 +243,16 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
};
static struct mmp_param_gate_clk apmu_gate_clks[] = {
- {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+ {PXA168_CLK_DFC, "dfc_clk", "dfc_mux", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, &dfc_lock},
{PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
{PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
- /* The gate clocks has mux parent. */
- {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
- {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+ {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x12, 0x12, 0x0, 0, &sdh0_lock},
+ {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x12, 0x12, 0x0, 0, &sdh1_lock},
+ {PXA168_CLK_SDH2, "sdh2_clk", "sdh2_mux", CLK_SET_RATE_PARENT, APMU_SDH2, 0x12, 0x12, 0x0, 0, &sdh2_lock},
+ {PXA168_CLK_SDH3, "sdh3_clk", "sdh3_mux", CLK_SET_RATE_PARENT, APMU_SDH3, 0x12, 0x12, 0x0, 0, &sdh3_lock},
+ /* SDH0/1 and 2/3 AXI clocks are also gated by common bits in SDH0 and SDH2 registers */
+ {PXA168_CLK_SDH01_AXI, "sdh01_axi_clk", NULL, CLK_SET_RATE_PARENT, APMU_SDH0, 0x9, 0x9, 0x0, 0, &sdh0_lock},
+ {PXA168_CLK_SDH23_AXI, "sdh23_axi_clk", NULL, CLK_SET_RATE_PARENT, APMU_SDH2, 0x9, 0x9, 0x0, 0, &sdh2_lock},
{PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
{PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index 585a02e0b330..fc403ad735ad 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -87,7 +87,7 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
struct resource *res;
struct clk *parent;
void __iomem *reg;
- int i, ret;
+ int i;
hw_tbg_data = devm_kzalloc(&pdev->dev,
struct_size(hw_tbg_data, hws, NUM_TBG),
@@ -123,9 +123,7 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
dev_err(dev, "Can't register TBG clock %s\n", name);
}
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
-
- return ret;
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
}
static int armada_3700_tbg_clock_remove(struct platform_device *pdev)
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 7e35c891e168..0a90452ee808 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -170,7 +170,7 @@ static struct clk *clk_register_dove_divider(struct device *dev,
.num_parents = num_parents,
};
- strlcpy(name, dc->name, sizeof(name));
+ strscpy(name, dc->name, sizeof(name));
dc->hw.init = &init;
dc->base = base;
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index f253ef1996b1..69ebf65081b8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -606,7 +606,7 @@ static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
if (IS_ERR(clk))
pr_warn("%s: failed to register irc clk\n", __func__);
- /* Register crystal oscillator controlller */
+ /* Register crystal oscillator controller */
parents[0] = of_clk_get_parent_name(np, 0);
clk = clk_register_gate(NULL, clk_src_names[CLK_SRC_OSC], parents[0],
0, base + LPC18XX_CGU_XTAL_OSC_CTRL,
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
index f9c31e3a0e47..2f4ffbd98282 100644
--- a/drivers/clk/pistachio/clk.h
+++ b/drivers/clk/pistachio/clk.h
@@ -31,10 +31,10 @@ struct pistachio_mux {
unsigned int shift;
unsigned int num_parents;
const char *name;
- const char **parents;
+ const char *const *parents;
};
-#define PNAME(x) static const char *x[] __initconst
+#define PNAME(x) static const char *const x[] __initconst
#define MUX(_id, _name, _pnames, _reg, _shift) \
{ \
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 03de634efc52..374098ebbf2b 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -104,6 +104,8 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
for (i = 0; i < nb_clks; i++) {
pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
+ if (!pxa_clk)
+ return -ENOMEM;
pxa_clk->is_in_low_power = clks[i].is_in_low_power;
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1cf1ef70e347..76e6dee450d5 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -180,6 +180,14 @@ config MSM_GCC_8660
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config MSM_GCC_8909
+ tristate "MSM8909 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8909 devices.
+ Say Y if you want to use devices such as UART, SPI, I2C, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
select QCOM_GDSC
@@ -445,6 +453,14 @@ config SC_GPUCC_7280
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_GPUCC_8280XP
+ tristate "SC8280XP Graphics Clock Controller"
+ select SC_GCC_8280XP
+ help
+ Support for the graphics clock controller on SC8280XP devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_LPASSCC_7280
tristate "SC7280 Low Power Audio Subsystem (LPASS) Clock Controller"
select SC_GCC_7280
@@ -545,10 +561,10 @@ config QCS_Q6SSTOP_404
controller to reset the Q6SSTOP subsystem.
config SDM_GCC_845
- tristate "SDM845 Global Clock Controller"
+ tristate "SDM845/SDM670 Global Clock Controller"
select QCOM_GDSC
help
- Support for the global clock controller on SDM845 devices.
+ Support for the global clock controller on SDM845 and SDM670 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
@@ -616,6 +632,15 @@ config SM_CAMCC_8450
Support for the camera clock controller on SM8450 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_DISPCC_6115
+ tristate "SM6115 Display Clock Controller"
+ depends on SM_GCC_6115
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM6115/SM4250 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen
+
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
depends on SM_GCC_6125
@@ -643,8 +668,18 @@ config SM_DISPCC_6350
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8450
+ tristate "SM8450 Display Clock Controller"
+ depends on SM_GCC_8450
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8450 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_6115
tristate "SM6115 and SM4250 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on SM6115 and SM4250 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -665,6 +700,14 @@ config SM_GCC_6350
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_6375
+ tristate "SM6375 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM6375 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
help
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index fbcf04073f07..e6cecf9e0436 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8909) += gcc-msm8909.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GCC_8280XP) += gcc-sc8280xp.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
+obj-$(CONFIG_SC_GPUCC_8280XP) += gpucc-sc8280xp.o
obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o
@@ -90,12 +92,15 @@ obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
+obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
+obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
+obj-$(CONFIG_SM_GCC_6375) += gcc-sm6375.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index 329d2c5356d8..f9c5e296dba2 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -127,7 +127,9 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
if (!init.name)
return -ENOMEM;
- init.parent_names = (const char *[]){ "xo" };
+ init.parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ };
init.num_parents = 1;
init.ops = &clk_pll_sr2_ops;
pll->clkr.hw.init = &init;
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index bef7899ad0d6..a5aea27eb867 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -2,6 +2,7 @@
// Copyright (c) 2018, The Linux Foundation. All rights reserved.
#include <linux/clk-provider.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -36,12 +37,28 @@ static struct clk_alpha_pll ipq_pll = {
},
};
-static const struct alpha_pll_config ipq_pll_config = {
+static const struct alpha_pll_config ipq6018_pll_config = {
.l = 0x37,
- .config_ctl_val = 0x04141200,
- .config_ctl_hi_val = 0x0,
+ .config_ctl_val = 0x240d4828,
+ .config_ctl_hi_val = 0x6,
.early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .aux_output_mask = BIT(1),
.main_output_mask = BIT(0),
+ .test_ctl_val = 0x1c0000C0,
+ .test_ctl_hi_val = 0x4000,
+};
+
+static const struct alpha_pll_config ipq8074_pll_config = {
+ .l = 0x48,
+ .config_ctl_val = 0x200d4828,
+ .config_ctl_hi_val = 0x6,
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .aux_output_mask = BIT(1),
+ .main_output_mask = BIT(0),
+ .test_ctl_val = 0x1c000000,
+ .test_ctl_hi_val = 0x4000,
};
static const struct regmap_config ipq_pll_regmap_config = {
@@ -54,6 +71,7 @@ static const struct regmap_config ipq_pll_regmap_config = {
static int apss_ipq_pll_probe(struct platform_device *pdev)
{
+ const struct alpha_pll_config *ipq_pll_config;
struct device *dev = &pdev->dev;
struct regmap *regmap;
void __iomem *base;
@@ -67,7 +85,11 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- clk_alpha_pll_configure(&ipq_pll, regmap, &ipq_pll_config);
+ ipq_pll_config = of_device_get_match_data(&pdev->dev);
+ if (!ipq_pll_config)
+ return -ENODEV;
+
+ clk_alpha_pll_configure(&ipq_pll, regmap, ipq_pll_config);
ret = devm_clk_register_regmap(dev, &ipq_pll.clkr);
if (ret)
@@ -78,7 +100,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
}
static const struct of_device_id apss_ipq_pll_match_table[] = {
- { .compatible = "qcom,ipq6018-a53pll" },
+ { .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_config },
+ { .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_config },
{ }
};
MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c
index d78ff2f310bf..f2f502e2d5a4 100644
--- a/drivers/clk/qcom/apss-ipq6018.c
+++ b/drivers/clk/qcom/apss-ipq6018.c
@@ -16,7 +16,7 @@
#include "clk-regmap.h"
#include "clk-branch.h"
#include "clk-alpha-pll.h"
-#include "clk-regmap-mux.h"
+#include "clk-rcg.h"
enum {
P_XO,
@@ -33,16 +33,15 @@ static const struct parent_map parents_apcs_alias0_clk_src_map[] = {
{ P_APSS_PLL_EARLY, 5 },
};
-static struct clk_regmap_mux apcs_alias0_clk_src = {
- .reg = 0x0050,
- .width = 3,
- .shift = 7,
+static struct clk_rcg2 apcs_alias0_clk_src = {
+ .cmd_rcgr = 0x0050,
+ .hid_width = 5,
.parent_map = parents_apcs_alias0_clk_src_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "apcs_alias0_clk_src",
.parent_data = parents_apcs_alias0_clk_src,
- .num_parents = 2,
- .ops = &clk_regmap_mux_closest_ops,
+ .num_parents = ARRAY_SIZE(parents_apcs_alias0_clk_src),
+ .ops = &clk_rcg2_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -57,7 +56,7 @@ static struct clk_branch apcs_alias0_core_clk = {
.parent_hws = (const struct clk_hw *[]){
&apcs_alias0_clk_src.clkr.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index b42684703fbb..1973d79c9465 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -27,6 +27,7 @@
# define PLL_VOTE_FSM_RESET BIT(21)
# define PLL_UPDATE BIT(22)
# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_FSM_LEGACY_MODE BIT(24)
# define PLL_OFFLINE_ACK BIT(28)
# define ALPHA_PLL_ACK_LATCH BIT(29)
# define PLL_ACTIVE_FLAG BIT(30)
@@ -166,6 +167,27 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -1102,6 +1124,10 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
}
+ if (pll->flags & SUPPORTS_FSM_LEGACY_MODE)
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_FSM_LEGACY_MODE,
+ PLL_FSM_LEGACY_MODE);
+
regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
PLL_UPDATE_BYPASS);
@@ -2088,7 +2114,7 @@ static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
return ret;
}
-static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+static void _alpha_pll_lucid_evo_disable(struct clk_hw *hw, bool reset)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
struct regmap *regmap = pll->clkr.regmap;
@@ -2117,9 +2143,12 @@ static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
/* Place the PLL mode in STANDBY */
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ if (reset)
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, 0);
}
-static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+static int _alpha_pll_lucid_evo_prepare(struct clk_hw *hw, bool reset)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
struct clk_hw *p;
@@ -2139,11 +2168,31 @@ static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
if (ret)
return ret;
- alpha_pll_lucid_evo_disable(hw);
+ _alpha_pll_lucid_evo_disable(hw, reset);
return 0;
}
+static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+{
+ _alpha_pll_lucid_evo_disable(hw, false);
+}
+
+static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+{
+ return _alpha_pll_lucid_evo_prepare(hw, false);
+}
+
+static void alpha_pll_reset_lucid_evo_disable(struct clk_hw *hw)
+{
+ _alpha_pll_lucid_evo_disable(hw, true);
+}
+
+static int alpha_pll_reset_lucid_evo_prepare(struct clk_hw *hw)
+{
+ return _alpha_pll_lucid_evo_prepare(hw, true);
+}
+
static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -2191,6 +2240,17 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
+const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
+ .prepare = alpha_pll_reset_lucid_evo_prepare,
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_reset_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 447efb82fe59..f9524b3fce6b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -19,6 +19,8 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
+ CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
+ CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -70,9 +72,10 @@ struct clk_alpha_pll {
const struct pll_vco *vco_table;
size_t num_vco;
-#define SUPPORTS_OFFLINE_REQ BIT(0)
-#define SUPPORTS_FSM_MODE BIT(2)
+#define SUPPORTS_OFFLINE_REQ BIT(0)
+#define SUPPORTS_FSM_MODE BIT(2)
#define SUPPORTS_DYNAMIC_UPDATE BIT(3)
+#define SUPPORTS_FSM_LEGACY_MODE BIT(4)
u8 flags;
struct clk_regmap clkr;
@@ -155,6 +158,7 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 4a4fde8dd12d..ee76ef958d31 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -49,6 +49,7 @@
* detect voltage droops.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
@@ -59,9 +60,10 @@
#include "clk-alpha-pll.h"
#include "clk-regmap.h"
+#include "clk-regmap-mux.h"
enum _pmux_input {
- DIV_2_INDEX = 0,
+ SMUX_INDEX = 0,
PLL_INDEX,
ACD_INDEX,
ALT_INDEX,
@@ -75,6 +77,8 @@ enum _pmux_input {
#define ALT_PLL_OFFSET 0x100
#define SSSCTL_OFFSET 0x160
+#define PMUX_MASK 0x3
+
static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
@@ -111,30 +115,90 @@ static const struct alpha_pll_config hfpll_config = {
.early_output_mask = BIT(3),
};
-static struct clk_alpha_pll perfcl_pll = {
- .offset = PERFCL_REG_OFFSET,
+static const struct clk_parent_data pll_parent[] = {
+ { .fw_name = "xo" },
+};
+
+static struct clk_alpha_pll pwrcl_pll = {
+ .offset = PWRCL_REG_OFFSET,
.regs = prim_pll_regs,
.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data){
- .name = "perfcl_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "pwrcl_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_huayra_ops,
},
};
-static struct clk_alpha_pll pwrcl_pll = {
- .offset = PWRCL_REG_OFFSET,
+static struct clk_alpha_pll perfcl_pll = {
+ .offset = PERFCL_REG_OFFSET,
.regs = prim_pll_regs,
.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data){
- .name = "pwrcl_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "perfcl_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_huayra_ops,
},
};
+static struct clk_fixed_factor pwrcl_pll_postdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pwrcl_pll_postdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pwrcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor perfcl_pll_postdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "perfcl_pll_postdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &perfcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor perfcl_pll_acd = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "perfcl_pll_acd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &perfcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor pwrcl_pll_acd = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "pwrcl_pll_acd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pwrcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct pll_vco alt_pll_vco_modes[] = {
VCO(3, 250000000, 500000000),
VCO(2, 500000000, 750000000),
@@ -153,93 +217,87 @@ static const struct alpha_pll_config altpll_config = {
.early_output_mask = BIT(3),
};
-static struct clk_alpha_pll perfcl_alt_pll = {
- .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
+static struct clk_alpha_pll pwrcl_alt_pll = {
+ .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
.regs = alt_pll_regs,
.vco_table = alt_pll_vco_modes,
.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data) {
- .name = "perfcl_alt_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "pwrcl_alt_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_hwfsm_ops,
},
};
-static struct clk_alpha_pll pwrcl_alt_pll = {
- .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
+static struct clk_alpha_pll perfcl_alt_pll = {
+ .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
.regs = alt_pll_regs,
.vco_table = alt_pll_vco_modes,
.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data) {
- .name = "pwrcl_alt_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "perfcl_alt_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_hwfsm_ops,
},
};
-struct clk_cpu_8996_mux {
+struct clk_cpu_8996_pmux {
u32 reg;
- u8 shift;
- u8 width;
struct notifier_block nb;
- struct clk_hw *pll;
- struct clk_hw *pll_div_2;
struct clk_regmap clkr;
};
static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data);
-#define to_clk_cpu_8996_mux_nb(_nb) \
- container_of(_nb, struct clk_cpu_8996_mux, nb)
+#define to_clk_cpu_8996_pmux_nb(_nb) \
+ container_of(_nb, struct clk_cpu_8996_pmux, nb)
-static inline struct clk_cpu_8996_mux *to_clk_cpu_8996_mux_hw(struct clk_hw *hw)
+static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
{
- return container_of(to_clk_regmap(hw), struct clk_cpu_8996_mux, clkr);
+ return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
}
-static u8 clk_cpu_8996_mux_get_parent(struct clk_hw *hw)
+static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
{
struct clk_regmap *clkr = to_clk_regmap(hw);
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- u32 mask = GENMASK(cpuclk->width - 1, 0);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
u32 val;
regmap_read(clkr->regmap, cpuclk->reg, &val);
- val >>= cpuclk->shift;
- return val & mask;
+ return FIELD_GET(PMUX_MASK, val);
}
-static int clk_cpu_8996_mux_set_parent(struct clk_hw *hw, u8 index)
+static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_regmap *clkr = to_clk_regmap(hw);
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- u32 mask = GENMASK(cpuclk->width + cpuclk->shift - 1, cpuclk->shift);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
u32 val;
- val = index;
- val <<= cpuclk->shift;
+ val = FIELD_PREP(PMUX_MASK, index);
- return regmap_update_bits(clkr->regmap, cpuclk->reg, mask, val);
+ return regmap_update_bits(clkr->regmap, cpuclk->reg, PMUX_MASK, val);
}
-static int clk_cpu_8996_mux_determine_rate(struct clk_hw *hw,
+static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- struct clk_hw *parent = cpuclk->pll;
+ struct clk_hw *parent;
- if (cpuclk->pll_div_2 && req->rate < DIV_2_THRESHOLD) {
- if (req->rate < (DIV_2_THRESHOLD / 2))
- return -EINVAL;
+ if (req->rate < (DIV_2_THRESHOLD / 2))
+ return -EINVAL;
- parent = cpuclk->pll_div_2;
- }
+ if (req->rate < DIV_2_THRESHOLD)
+ parent = clk_hw_get_parent_by_index(hw, SMUX_INDEX);
+ else
+ parent = clk_hw_get_parent_by_index(hw, ACD_INDEX);
+ if (!parent)
+ return -EINVAL;
req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
req->best_parent_hw = parent;
@@ -247,83 +305,83 @@ static int clk_cpu_8996_mux_determine_rate(struct clk_hw *hw,
return 0;
}
-static const struct clk_ops clk_cpu_8996_mux_ops = {
- .set_parent = clk_cpu_8996_mux_set_parent,
- .get_parent = clk_cpu_8996_mux_get_parent,
- .determine_rate = clk_cpu_8996_mux_determine_rate,
+static const struct clk_ops clk_cpu_8996_pmux_ops = {
+ .set_parent = clk_cpu_8996_pmux_set_parent,
+ .get_parent = clk_cpu_8996_pmux_get_parent,
+ .determine_rate = clk_cpu_8996_pmux_determine_rate,
+};
+
+static const struct clk_parent_data pwrcl_smux_parents[] = {
+ { .fw_name = "xo" },
+ { .hw = &pwrcl_pll_postdiv.hw },
};
-static struct clk_cpu_8996_mux pwrcl_smux = {
+static const struct clk_parent_data perfcl_smux_parents[] = {
+ { .fw_name = "xo" },
+ { .hw = &perfcl_pll_postdiv.hw },
+};
+
+static struct clk_regmap_mux pwrcl_smux = {
.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pwrcl_smux",
- .parent_names = (const char *[]){
- "xo",
- "pwrcl_pll_main",
- },
- .num_parents = 2,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_data = pwrcl_smux_parents,
+ .num_parents = ARRAY_SIZE(pwrcl_smux_parents),
+ .ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_cpu_8996_mux perfcl_smux = {
+static struct clk_regmap_mux perfcl_smux = {
.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "perfcl_smux",
- .parent_names = (const char *[]){
- "xo",
- "perfcl_pll_main",
- },
- .num_parents = 2,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_data = perfcl_smux_parents,
+ .num_parents = ARRAY_SIZE(perfcl_smux_parents),
+ .ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_cpu_8996_mux pwrcl_pmux = {
+static const struct clk_hw *pwrcl_pmux_parents[] = {
+ [SMUX_INDEX] = &pwrcl_smux.clkr.hw,
+ [PLL_INDEX] = &pwrcl_pll.clkr.hw,
+ [ACD_INDEX] = &pwrcl_pll_acd.hw,
+ [ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
+};
+
+static const struct clk_hw *perfcl_pmux_parents[] = {
+ [SMUX_INDEX] = &perfcl_smux.clkr.hw,
+ [PLL_INDEX] = &perfcl_pll.clkr.hw,
+ [ACD_INDEX] = &perfcl_pll_acd.hw,
+ [ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
+};
+
+static struct clk_cpu_8996_pmux pwrcl_pmux = {
.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
- .shift = 0,
- .width = 2,
- .pll = &pwrcl_pll.clkr.hw,
- .pll_div_2 = &pwrcl_smux.clkr.hw,
.nb.notifier_call = cpu_clk_notifier_cb,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pwrcl_pmux",
- .parent_names = (const char *[]){
- "pwrcl_smux",
- "pwrcl_pll",
- "pwrcl_pll_acd",
- "pwrcl_alt_pll",
- },
- .num_parents = 4,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_hws = pwrcl_pmux_parents,
+ .num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
+ .ops = &clk_cpu_8996_pmux_ops,
/* CPU clock is critical and should never be gated */
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
},
};
-static struct clk_cpu_8996_mux perfcl_pmux = {
+static struct clk_cpu_8996_pmux perfcl_pmux = {
.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
- .shift = 0,
- .width = 2,
- .pll = &perfcl_pll.clkr.hw,
- .pll_div_2 = &perfcl_smux.clkr.hw,
.nb.notifier_call = cpu_clk_notifier_cb,
.clkr.hw.init = &(struct clk_init_data) {
.name = "perfcl_pmux",
- .parent_names = (const char *[]){
- "perfcl_smux",
- "perfcl_pll",
- "perfcl_pll_acd",
- "perfcl_alt_pll",
- },
- .num_parents = 4,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_hws = perfcl_pmux_parents,
+ .num_parents = ARRAY_SIZE(perfcl_pmux_parents),
+ .ops = &clk_cpu_8996_pmux_ops,
/* CPU clock is critical and should never be gated */
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
},
@@ -338,15 +396,22 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static struct clk_hw *cpu_msm8996_hw_clks[] = {
+ &pwrcl_pll_postdiv.hw,
+ &perfcl_pll_postdiv.hw,
+ &pwrcl_pll_acd.hw,
+ &perfcl_pll_acd.hw,
+};
+
static struct clk_regmap *cpu_msm8996_clks[] = {
- &perfcl_pll.clkr,
&pwrcl_pll.clkr,
- &perfcl_alt_pll.clkr,
+ &perfcl_pll.clkr,
&pwrcl_alt_pll.clkr,
- &perfcl_smux.clkr,
+ &perfcl_alt_pll.clkr,
&pwrcl_smux.clkr,
- &perfcl_pmux.clkr,
+ &perfcl_smux.clkr,
&pwrcl_pmux.clkr,
+ &perfcl_pmux.clkr,
};
static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
@@ -354,67 +419,33 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
{
int i, ret;
- perfcl_smux.pll = clk_hw_register_fixed_factor(dev, "perfcl_pll_main",
- "perfcl_pll",
- CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(perfcl_smux.pll)) {
- dev_err(dev, "Failed to initialize perfcl_pll_main\n");
- return PTR_ERR(perfcl_smux.pll);
- }
-
- pwrcl_smux.pll = clk_hw_register_fixed_factor(dev, "pwrcl_pll_main",
- "pwrcl_pll",
- CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(pwrcl_smux.pll)) {
- dev_err(dev, "Failed to initialize pwrcl_pll_main\n");
- clk_hw_unregister(perfcl_smux.pll);
- return PTR_ERR(pwrcl_smux.pll);
+ for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
+ ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
+ if (ret)
+ return ret;
}
for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
- if (ret) {
- clk_hw_unregister(perfcl_smux.pll);
- clk_hw_unregister(pwrcl_smux.pll);
+ if (ret)
return ret;
- }
}
- clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
- clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
/* Enable alt PLLs */
clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
- clk_notifier_register(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
- clk_notifier_register(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
+ devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
+ devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
return ret;
}
-static int qcom_cpu_clk_msm8996_unregister_clks(void)
-{
- int ret = 0;
-
- ret = clk_notifier_unregister(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
- if (ret)
- return ret;
-
- ret = clk_notifier_unregister(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
- if (ret)
- return ret;
-
- clk_hw_unregister(perfcl_smux.pll);
- clk_hw_unregister(pwrcl_smux.pll);
-
- return 0;
-}
-
#define CPU_AFINITY_MASK 0xFFF
#define PWRCL_CPU_REG_MASK 0x3
#define PERFCL_CPU_REG_MASK 0x103
@@ -456,22 +487,22 @@ static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_nb(nb);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
struct clk_notifier_data *cnd = data;
int ret;
switch (event) {
case PRE_RATE_CHANGE:
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
qcom_cpu_clk_msm8996_acd_init(base);
break;
case POST_RATE_CHANGE:
if (cnd->new_rate < DIV_2_THRESHOLD)
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw,
- DIV_2_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
+ SMUX_INDEX);
else
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw,
- ACD_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
+ ACD_INDEX);
break;
default:
ret = 0;
@@ -513,11 +544,6 @@ static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
}
-static int qcom_cpu_clk_msm8996_driver_remove(struct platform_device *pdev)
-{
- return qcom_cpu_clk_msm8996_unregister_clks();
-}
-
static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
{ .compatible = "qcom,msm8996-apcc" },
{}
@@ -526,7 +552,6 @@ MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
static struct platform_driver qcom_cpu_clk_msm8996_driver = {
.probe = qcom_cpu_clk_msm8996_driver_probe,
- .remove = qcom_cpu_clk_msm8996_driver_remove,
.driver = {
.name = "qcom-msm8996-apcc",
.of_match_table = qcom_cpu_clk_msm8996_match_table,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 012e745794fd..01581f4d2c39 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -167,6 +167,7 @@ struct clk_rcg2_gfx3d {
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_rcg2_mux_closest_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 28019edd2a50..76551534f10d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -509,6 +509,13 @@ const struct clk_ops clk_rcg2_floor_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+const struct clk_ops clk_rcg2_mux_closest_ops = {
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
+
struct frac_entry {
int num;
int den;
@@ -908,6 +915,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
req->best_parent_hw = p2;
}
+ clk_hw_get_rate_range(req->best_parent_hw,
+ &parent_req.min_rate, &parent_req.max_rate);
+
+ if (req->min_rate > parent_req.min_rate)
+ parent_req.min_rate = req->min_rate;
+
+ if (req->max_rate < parent_req.max_rate)
+ parent_req.max_rate = req->max_rate;
+
ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c07cab6905cb..0471bab82464 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -195,10 +195,6 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
{
int ret;
- /* Nothing required to be done if already off or on */
- if (enable == c->state)
- return 0;
-
c->state = enable ? c->valid_state_mask : 0;
c->aggr_state = c->state | c->peer->state;
c->peer->aggr_state = c->aggr_state;
@@ -382,6 +378,26 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
+static struct clk_hw *sdm670_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_CE_CLK] = &sdm845_ce.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdm670 = {
+ .clks = sdm670_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdm670_rpmh_clocks),
+};
+
DEFINE_CLK_RPMH_VRM(sdx55, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
DEFINE_CLK_RPMH_VRM(sdx55, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
DEFINE_CLK_RPMH_BCM(sdx55, qpic_clk, "QP0");
@@ -715,6 +731,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65},
{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 10b4e6d8d10f..fea505876855 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -417,6 +417,7 @@ DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2, 19200000);
@@ -427,6 +428,40 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2, 192
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5, 19200000);
+static struct clk_smd_rpm *msm8909_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8909 = {
+ .clks = msm8909_clks,
+ .num_clks = ARRAY_SIZE(msm8909_clks),
+};
+
static struct clk_smd_rpm *msm8916_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
[RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
@@ -787,7 +822,6 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
};
DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8, 19200000);
static struct clk_smd_rpm *qcs404_clks[] = {
@@ -1085,13 +1119,54 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
.num_clks = ARRAY_SIZE(sm6115_clks),
};
+/* SM6375 */
+DEFINE_CLK_SMD_RPM(sm6375, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(sm6375, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 1);
+DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(sm6375, bimc_freq_log, bimc_freq_log_a, QCOM_SMD_RPM_MISC_CLK, 4, 1);
+static struct clk_smd_rpm *sm6375_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6375_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6375_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6375_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6375_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
+ [RPM_SMD_BIMC_FREQ_LOG] = &sm6375_bimc_freq_log,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sm6375 = {
+ .clks = sm6375_clks,
+ .num_clks = ARRAY_SIZE(sm6375_clks),
+};
+
/* QCM2290 */
DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, ln_bb_clk2, ln_bb_clk2_a, 0x2, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, rf_clk3, rf_clk3_a, 6, 38400000);
DEFINE_CLK_SMD_RPM(qcm2290, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
DEFINE_CLK_SMD_RPM(qcm2290, cpuss_gnoc_clk, cpuss_gnoc_a_clk,
QCOM_SMD_RPM_MEM_CLK, 1);
DEFINE_CLK_SMD_RPM(qcm2290, bimc_gpu_clk, bimc_gpu_a_clk,
@@ -1146,6 +1221,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcm2290 = {
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-mdm9607", .data = &rpm_clk_mdm9607 },
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8909", .data = &rpm_clk_msm8909 },
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
@@ -1160,6 +1236,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
{ .compatible = "qcom,rpmcc-sm6125", .data = &rpm_clk_sm6125 },
+ { .compatible = "qcom,rpmcc-sm6375", .data = &rpm_clk_sm6375 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
new file mode 100644
index 000000000000..818bb8f4637c
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on dispcc-qcm2290.c
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_GPLL0_DISP_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct clk_parent_data parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct pll_vco spark_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x28,
+ .alpha = 0x0,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_main = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_disp_cc_pll0_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0_out_main",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_DISP_DIV },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0_out_main.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK, },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d4,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x205c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x208c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32764, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x6050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_sm6115_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_sm6115_clocks[] = {
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL0_OUT_MAIN] = &disp_cc_pll0_out_main.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm6115_desc = {
+ .config = &disp_cc_sm6115_regmap_config,
+ .clks = disp_cc_sm6115_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm6115_clocks),
+ .gdscs = disp_cc_sm6115_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm6115_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm6115_match_table[] = {
+ { .compatible = "qcom,sm6115-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm6115_match_table);
+
+static int disp_cc_sm6115_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm6115_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /* Keep DISP_CC_XO_CLK always-ON */
+ regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm6115_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm6115_driver = {
+ .probe = disp_cc_sm6115_probe,
+ .driver = {
+ .name = "dispcc-sm6115",
+ .of_match_table = disp_cc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm6115_driver);
+MODULE_DESCRIPTION("Qualcomm SM6115 Display Clock controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
new file mode 100644
index 000000000000..0cd7ebe90301
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -0,0 +1,1829 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8450-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xD,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32AA299C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1F,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32AA299C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8324,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8134,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+ F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x81bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8254,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x828c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82a4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8308,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x82ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x82d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x816c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x8184,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_DISP_CC_PLL1_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x8104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x811c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x814c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8168,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x824c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x8284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8304,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot1_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0xe078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8450_clocks[] = {
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT1_CLK] = &disp_cc_mdss_rot1_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8450_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8450_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8450_desc = {
+ .config = &disp_cc_sm8450_regmap_config,
+ .clks = disp_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8450_clocks),
+ .resets = disp_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8450_resets),
+ .gdscs = disp_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table);
+
+static void disp_cc_sm8450_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static int disp_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8450_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /*
+ * Keep clocks always enabled:
+ * disp_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8450_driver = {
+ .probe = disp_cc_sm8450_probe,
+ .driver = {
+ .name = "disp_cc-sm8450",
+ .of_match_table = disp_cc_sm8450_match_table,
+ },
+};
+
+static int __init disp_cc_sm8450_init(void)
+{
+ return platform_driver_register(&disp_cc_sm8450_driver);
+}
+subsys_initcall(disp_cc_sm8450_init);
+
+static void __exit disp_cc_sm8450_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sm8450_driver);
+}
+module_exit(disp_cc_sm8450_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 94ea2d84d1b1..a9eb6a9ac445 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -34,7 +34,9 @@ static struct clk_pll pll8 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll8",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -45,7 +47,9 @@ static struct clk_regmap pll8_vote = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "pll8_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll8.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -62,9 +66,9 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char * const gcc_pxo_pll8[] = {
- "pxo",
- "pll8_vote",
+static const struct clk_parent_data gcc_pxo_pll8[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
};
static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
@@ -73,10 +77,10 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char * const gcc_pxo_pll8_cxo[] = {
- "pxo",
- "pll8_vote",
- "cxo",
+static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .fw_name = "cxo", .name = "cxo_board" },
};
static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -122,8 +126,8 @@ static struct clk_rcg gsbi1_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -138,8 +142,8 @@ static struct clk_branch gsbi1_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_clk",
- .parent_names = (const char *[]){
- "gsbi1_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -173,8 +177,8 @@ static struct clk_rcg gsbi2_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -189,8 +193,8 @@ static struct clk_branch gsbi2_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_clk",
- .parent_names = (const char *[]){
- "gsbi2_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -224,8 +228,8 @@ static struct clk_rcg gsbi3_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -240,8 +244,8 @@ static struct clk_branch gsbi3_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_clk",
- .parent_names = (const char *[]){
- "gsbi3_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -275,8 +279,8 @@ static struct clk_rcg gsbi4_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -291,8 +295,8 @@ static struct clk_branch gsbi4_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_clk",
- .parent_names = (const char *[]){
- "gsbi4_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -326,8 +330,8 @@ static struct clk_rcg gsbi5_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -342,8 +346,8 @@ static struct clk_branch gsbi5_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_clk",
- .parent_names = (const char *[]){
- "gsbi5_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -377,8 +381,8 @@ static struct clk_rcg gsbi6_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -393,8 +397,8 @@ static struct clk_branch gsbi6_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_clk",
- .parent_names = (const char *[]){
- "gsbi6_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -428,8 +432,8 @@ static struct clk_rcg gsbi7_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -444,8 +448,8 @@ static struct clk_branch gsbi7_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_clk",
- .parent_names = (const char *[]){
- "gsbi7_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -479,8 +483,8 @@ static struct clk_rcg gsbi8_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -495,7 +499,9 @@ static struct clk_branch gsbi8_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_clk",
- .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -528,8 +534,8 @@ static struct clk_rcg gsbi9_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -544,7 +550,9 @@ static struct clk_branch gsbi9_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_clk",
- .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -577,8 +585,8 @@ static struct clk_rcg gsbi10_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -593,7 +601,9 @@ static struct clk_branch gsbi10_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_clk",
- .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -626,8 +636,8 @@ static struct clk_rcg gsbi11_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -642,7 +652,9 @@ static struct clk_branch gsbi11_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_clk",
- .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -675,8 +687,8 @@ static struct clk_rcg gsbi12_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -691,7 +703,9 @@ static struct clk_branch gsbi12_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_clk",
- .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -737,8 +751,8 @@ static struct clk_rcg gsbi1_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -753,7 +767,9 @@ static struct clk_branch gsbi1_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_clk",
- .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -786,8 +802,8 @@ static struct clk_rcg gsbi2_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -802,7 +818,9 @@ static struct clk_branch gsbi2_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_clk",
- .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -835,8 +853,8 @@ static struct clk_rcg gsbi3_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -851,7 +869,9 @@ static struct clk_branch gsbi3_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_clk",
- .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -884,8 +904,8 @@ static struct clk_rcg gsbi4_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -900,7 +920,9 @@ static struct clk_branch gsbi4_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_clk",
- .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -933,8 +955,8 @@ static struct clk_rcg gsbi5_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -949,7 +971,9 @@ static struct clk_branch gsbi5_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_clk",
- .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -982,8 +1006,8 @@ static struct clk_rcg gsbi6_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -998,7 +1022,9 @@ static struct clk_branch gsbi6_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_clk",
- .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1031,8 +1057,8 @@ static struct clk_rcg gsbi7_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1047,7 +1073,9 @@ static struct clk_branch gsbi7_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_clk",
- .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1080,8 +1108,8 @@ static struct clk_rcg gsbi8_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1096,7 +1124,9 @@ static struct clk_branch gsbi8_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_clk",
- .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1129,8 +1159,8 @@ static struct clk_rcg gsbi9_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1145,7 +1175,9 @@ static struct clk_branch gsbi9_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_clk",
- .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1178,8 +1210,8 @@ static struct clk_rcg gsbi10_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1194,7 +1226,9 @@ static struct clk_branch gsbi10_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_clk",
- .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1227,8 +1261,8 @@ static struct clk_rcg gsbi11_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1243,7 +1277,9 @@ static struct clk_branch gsbi11_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_clk",
- .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1276,8 +1312,8 @@ static struct clk_rcg gsbi12_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1292,7 +1328,9 @@ static struct clk_branch gsbi12_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_clk",
- .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1338,8 +1376,8 @@ static struct clk_rcg gp0_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp0_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1354,7 +1392,9 @@ static struct clk_branch gp0_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp0_clk",
- .parent_names = (const char *[]){ "gp0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1387,8 +1427,8 @@ static struct clk_rcg gp1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp1_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1403,7 +1443,9 @@ static struct clk_branch gp1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp1_clk",
- .parent_names = (const char *[]){ "gp1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1436,8 +1478,8 @@ static struct clk_rcg gp2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp2_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1452,7 +1494,9 @@ static struct clk_branch gp2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp2_clk",
- .parent_names = (const char *[]){ "gp2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1488,8 +1532,8 @@ static struct clk_rcg prng_src = {
.clkr.hw = {
.init = &(struct clk_init_data){
.name = "prng_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
},
@@ -1504,7 +1548,9 @@ static struct clk_branch prng_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "prng_clk",
- .parent_names = (const char *[]){ "prng_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &prng_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -1547,8 +1593,8 @@ static struct clk_rcg sdc1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc1_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1562,7 +1608,9 @@ static struct clk_branch sdc1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc1_clk",
- .parent_names = (const char *[]){ "sdc1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1595,8 +1643,8 @@ static struct clk_rcg sdc2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc2_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1610,7 +1658,9 @@ static struct clk_branch sdc2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc2_clk",
- .parent_names = (const char *[]){ "sdc2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1643,8 +1693,8 @@ static struct clk_rcg sdc3_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc3_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1658,7 +1708,9 @@ static struct clk_branch sdc3_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc3_clk",
- .parent_names = (const char *[]){ "sdc3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1691,8 +1743,8 @@ static struct clk_rcg sdc4_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc4_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1706,7 +1758,9 @@ static struct clk_branch sdc4_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc4_clk",
- .parent_names = (const char *[]){ "sdc4_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc4_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1739,8 +1793,8 @@ static struct clk_rcg sdc5_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc5_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1754,7 +1808,9 @@ static struct clk_branch sdc5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc5_clk",
- .parent_names = (const char *[]){ "sdc5_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc5_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1792,8 +1848,8 @@ static struct clk_rcg tsif_ref_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1808,7 +1864,9 @@ static struct clk_branch tsif_ref_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk",
- .parent_names = (const char *[]){ "tsif_ref_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &tsif_ref_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1846,8 +1904,8 @@ static struct clk_rcg usb_hs1_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1862,7 +1920,9 @@ static struct clk_branch usb_hs1_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs1_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1895,16 +1955,14 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
-
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 15,
@@ -1913,7 +1971,9 @@ static struct clk_branch usb_fs1_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_clk",
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1928,7 +1988,9 @@ static struct clk_branch usb_fs1_system_clk = {
.enable_reg = 0x296c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_fs1_system_clk",
.ops = &clk_branch_ops,
@@ -1962,16 +2024,14 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
-
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 12,
@@ -1980,7 +2040,9 @@ static struct clk_branch usb_fs2_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1996,7 +2058,9 @@ static struct clk_branch usb_fs2_system_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_system_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2703,17 +2767,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8660_match_table);
static int gcc_msm8660_probe(struct platform_device *pdev)
{
- int ret;
- struct device *dev = &pdev->dev;
-
- ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000);
- if (ret)
- return ret;
-
- ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000);
- if (ret)
- return ret;
-
return qcom_cc_probe(pdev, &gcc_msm8660_desc);
}
diff --git a/drivers/clk/qcom/gcc-msm8909.c b/drivers/clk/qcom/gcc-msm8909.c
new file mode 100644
index 000000000000..2a00b11ce2cd
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8909.c
@@ -0,0 +1,2731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ *
+ * Based on gcc-msm8916.c:
+ * Copyright 2015 Linaro Limited
+ * adapted with data from clock-gcc-8909.c in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8909.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_DSI0PLL,
+ DT_DSI0PLL_BYTE,
+};
+
+enum {
+ P_XO,
+ P_SLEEP_CLK,
+ P_GPLL0,
+ P_GPLL1,
+ P_GPLL2,
+ P_BIMC,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+};
+
+static const struct parent_map gcc_xo_map[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_xo_data[] = {
+ { .index = DT_XO },
+};
+
+static const struct clk_parent_data gcc_sleep_clk_data[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll1",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll1_vote",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2_early = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll2_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll bimc_pll_early = {
+ .offset = 0x23000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "bimc_pll_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv bimc_pll = {
+ .offset = 0x23000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_pll",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_pll_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_BIMC, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_bimc_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &bimc_pll.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_ddr_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_gpu_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_byte0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_byte_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL_BYTE },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_byte0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "byte0_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_map),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_esc0_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "esc0_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_gfx3d_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+};
+
+static const struct clk_parent_data gcc_gfx3d_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ F(409600000, P_GPLL1, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gcc_gfx3d_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_gfx3d_data,
+ .num_parents = ARRAY_SIZE(gcc_gfx3d_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+ F(150000, P_XO, 1, 1, 128),
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mclk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 3 },
+};
+
+static const struct clk_parent_data gcc_mclk_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(24000000, P_GPLL2, 1, 1, 33),
+ F(66667000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mdp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 3 },
+};
+
+static const struct clk_parent_data gcc_mdp_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .parent_map = gcc_mdp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mdp_clk_src",
+ .parent_data = gcc_mdp_data,
+ .num_parents = ARRAY_SIZE(gcc_mdp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_pclk0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_pclk_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_2_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gcc_sdcc1_2_apps_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gcc_sdcc1_2_apps_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(57140000, P_GPLL0, 14, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_vcodec0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 3 },
+};
+
+static const struct clk_parent_data gcc_vcodec0_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .parent_map = gcc_vcodec0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_vcodec0_data,
+ .num_parents = ARRAY_SIZE(gcc_vcodec0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vsync_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_tcu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x01004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_sleep_clk",
+ .parent_data = gcc_sleep_clk_data,
+ .num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gfx_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gfx_tcu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gtcu_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdp_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_smmu_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gpu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x56004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .parent_data = gcc_sleep_clk_data,
+ .num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []) { 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .cxcs = (unsigned int []) { 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []) { 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []) { 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []) { 0x58038, 0x58048, 0x58050 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "vfe_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8909_clocks[] = {
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [GPLL2_EARLY] = &gpll2_early.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [BIMC_PLL_EARLY] = &bimc_pll_early.clkr,
+ [BIMC_PLL] = &bimc_pll.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8909_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8909_resets[] = {
+ [GCC_AUDIO_CORE_BCR] = { 0x1c008 },
+ [GCC_BLSP1_BCR] = { 0x01000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04018 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05018 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06018 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07018 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028 },
+ [GCC_CAMSS_CSI0_BCR] = { 0x4e038 },
+ [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 },
+ [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 },
+ [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c },
+ [GCC_CAMSS_CSI1_BCR] = { 0x4f038 },
+ [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 },
+ [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c },
+ [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c },
+ [GCC_CAMSS_GP0_BCR] = { 0x54014 },
+ [GCC_CAMSS_GP1_BCR] = { 0x55014 },
+ [GCC_CAMSS_ISPIF_BCR] = { 0x50000 },
+ [GCC_CAMSS_MCLK0_BCR] = { 0x52014 },
+ [GCC_CAMSS_MCLK1_BCR] = { 0x53014 },
+ [GCC_CAMSS_PHY0_BCR] = { 0x4e018 },
+ [GCC_CAMSS_TOP_BCR] = { 0x56000 },
+ [GCC_CAMSS_TOP_AHB_BCR] = { 0x5a018 },
+ [GCC_CAMSS_VFE_BCR] = { 0x58030 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_OXILI_BCR] = { 0x59018 },
+ [GCC_PDM_BCR] = { 0x44000 },
+ [GCC_PRNG_BCR] = { 0x13000 },
+ [GCC_QUSB2_PHY_BCR] = { 0x4103c },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
+ [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 },
+ [GCC_USB2A_PHY_BCR] = { 0x41028 },
+ [GCC_USB2_HS_PHY_ONLY_BCR] = { .reg = 0x41034, .udelay = 15 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_VENUS0_BCR] = { 0x4c014 },
+ /* Subsystem Restart */
+ [GCC_MSS_RESTART] = { 0x3e000 },
+};
+
+static const struct regmap_config gcc_msm8909_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8909_desc = {
+ .config = &gcc_msm8909_regmap_config,
+ .clks = gcc_msm8909_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8909_clocks),
+ .resets = gcc_msm8909_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8909_resets),
+ .gdscs = gcc_msm8909_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8909_gdscs),
+};
+
+static const struct of_device_id gcc_msm8909_match_table[] = {
+ { .compatible = "qcom,gcc-msm8909" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8909_match_table);
+
+static int gcc_msm8909_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_msm8909_desc);
+}
+
+static struct platform_driver gcc_msm8909_driver = {
+ .probe = gcc_msm8909_probe,
+ .driver = {
+ .name = "gcc-msm8909",
+ .of_match_table = gcc_msm8909_match_table,
+ },
+};
+
+static int __init gcc_msm8909_init(void)
+{
+ return platform_driver_register(&gcc_msm8909_driver);
+}
+core_initcall(gcc_msm8909_init);
+
+static void __exit gcc_msm8909_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8909_driver);
+}
+module_exit(gcc_msm8909_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8909 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gcc-msm8909");
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 9a46794f6eb8..0c8fe19387a7 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -42,14 +42,138 @@ enum {
P_EXT_MCLK,
};
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21010,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a010,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll bimc_pll = {
+ .l_reg = 0x23004,
+ .m_reg = 0x23008,
+ .n_reg = 0x2300c,
+ .config_reg = 0x23010,
+ .mode_reg = 0x23000,
+ .status_reg = 0x2301c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap bimc_pll_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_pll_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
};
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0_vote",
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
@@ -58,10 +182,10 @@ static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
{ P_BIMC, 2 },
};
-static const char * const gcc_xo_gpll0_bimc[] = {
- "xo",
- "gpll0_vote",
- "bimc_pll_vote",
+static const struct clk_parent_data gcc_xo_gpll0_bimc[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
};
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
@@ -71,11 +195,11 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
{ P_GPLL2_AUX, 2 },
};
-static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0a_gpll1_gpll2a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
@@ -84,10 +208,10 @@ static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char * const gcc_xo_gpll0_gpll2[] = {
- "xo",
- "gpll0_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0_gpll2[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0a_map[] = {
@@ -95,9 +219,9 @@ static const struct parent_map gcc_xo_gpll0a_map[] = {
{ P_GPLL0_AUX, 2 },
};
-static const char * const gcc_xo_gpll0a[] = {
- "xo",
- "gpll0_vote",
+static const struct clk_parent_data gcc_xo_gpll0a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
@@ -107,11 +231,11 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
{ P_SLEEP_CLK, 6 },
};
-static const char * const gcc_xo_gpll0_gpll1a_sleep[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
@@ -120,10 +244,10 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
{ P_GPLL1_AUX, 2 },
};
-static const char * const gcc_xo_gpll0_gpll1a[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
};
static const struct parent_map gcc_xo_dsibyte_map[] = {
@@ -131,9 +255,9 @@ static const struct parent_map gcc_xo_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 2 },
};
-static const char * const gcc_xo_dsibyte[] = {
- "xo",
- "dsi0pllbyte",
+static const struct clk_parent_data gcc_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
};
static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
@@ -142,10 +266,10 @@ static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 1 },
};
-static const char * const gcc_xo_gpll0a_dsibyte[] = {
- "xo",
- "gpll0_vote",
- "dsi0pllbyte",
+static const struct clk_parent_data gcc_xo_gpll0a_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
};
static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
@@ -154,10 +278,10 @@ static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 2 },
};
-static const char * const gcc_xo_gpll0_dsiphy[] = {
- "xo",
- "gpll0_vote",
- "dsi0pll",
+static const struct clk_parent_data gcc_xo_gpll0_dsiphy[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
};
static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
@@ -166,10 +290,10 @@ static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 1 },
};
-static const char * const gcc_xo_gpll0a_dsiphy[] = {
- "xo",
- "gpll0_vote",
- "dsi0pll",
+static const struct clk_parent_data gcc_xo_gpll0a_dsiphy[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
};
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
@@ -179,11 +303,11 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0a_gpll1_gpll2[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
@@ -193,11 +317,11 @@ static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll0_gpll1_sleep[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
@@ -208,12 +332,12 @@ static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_epi2s_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_pri_i2s",
- "ext_mclk",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll1_epi2s_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_pri_i2s", .name = "ext_pri_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
@@ -224,12 +348,12 @@ static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_esi2s_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_sec_i2s",
- "ext_mclk",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll1_esi2s_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_sec_i2s", .name = "ext_sec_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_sleep_map[] = {
@@ -237,9 +361,9 @@ static const struct parent_map gcc_xo_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_sleep[] = {
- "xo",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
@@ -249,119 +373,11 @@ static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_mclk",
- "sleep_clk",
-};
-
-static struct clk_pll gpll0 = {
- .l_reg = 0x21004,
- .m_reg = 0x21008,
- .n_reg = 0x2100c,
- .config_reg = 0x21010,
- .mode_reg = 0x21000,
- .status_reg = 0x2101c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll0",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll0_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gpll0_vote",
- .parent_names = (const char *[]){ "gpll0" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll gpll1 = {
- .l_reg = 0x20004,
- .m_reg = 0x20008,
- .n_reg = 0x2000c,
- .config_reg = 0x20010,
- .mode_reg = 0x20000,
- .status_reg = 0x2001c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll1",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll1_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data){
- .name = "gpll1_vote",
- .parent_names = (const char *[]){ "gpll1" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll gpll2 = {
- .l_reg = 0x4a004,
- .m_reg = 0x4a008,
- .n_reg = 0x4a00c,
- .config_reg = 0x4a010,
- .mode_reg = 0x4a000,
- .status_reg = 0x4a01c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll2",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll2_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(2),
- .hw.init = &(struct clk_init_data){
- .name = "gpll2_vote",
- .parent_names = (const char *[]){ "gpll2" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll bimc_pll = {
- .l_reg = 0x23004,
- .m_reg = 0x23008,
- .n_reg = 0x2300c,
- .config_reg = 0x23010,
- .mode_reg = 0x23000,
- .status_reg = 0x2301c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "bimc_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap bimc_pll_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(3),
- .hw.init = &(struct clk_init_data){
- .name = "bimc_pll_vote",
- .parent_names = (const char *[]){ "bimc_pll" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
+static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
@@ -370,8 +386,8 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcnoc_bfdcd_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
},
};
@@ -382,8 +398,8 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "system_noc_bfdcd_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
},
};
@@ -402,8 +418,8 @@ static struct clk_rcg2 camss_ahb_clk_src = {
.freq_tbl = ftbl_gcc_camss_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -423,8 +439,8 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.freq_tbl = ftbl_apss_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_ahb_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -442,8 +458,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -455,8 +471,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -483,8 +499,8 @@ static struct clk_rcg2 gfx3d_clk_src = {
.freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = gcc_xo_gpll0a_gpll1_gpll2a,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0a_gpll1_gpll2a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_gpll1_gpll2a),
.ops = &clk_rcg2_ops,
},
};
@@ -510,8 +526,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_gcc_camss_vfe0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll2,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -529,8 +545,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -558,8 +574,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -571,8 +587,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -585,8 +601,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -598,8 +614,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -612,8 +628,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -625,8 +641,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -639,8 +655,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -652,8 +668,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -666,8 +682,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -679,8 +695,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -693,8 +709,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -726,8 +742,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -740,8 +756,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -759,8 +775,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_gcc_camss_cci_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = gcc_xo_gpll0a,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a),
.ops = &clk_rcg2_ops,
},
};
@@ -792,8 +808,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_gcc_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -806,8 +822,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_gcc_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -826,8 +842,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_gcc_camss_jpeg0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -847,8 +863,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -861,8 +877,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -880,8 +896,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a),
.ops = &clk_rcg2_ops,
},
};
@@ -893,8 +909,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a),
.ops = &clk_rcg2_ops,
},
};
@@ -913,8 +929,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_gcc_camss_cpp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = gcc_xo_gpll0_gpll2,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -934,8 +950,8 @@ static struct clk_rcg2 crypto_clk_src = {
.freq_tbl = ftbl_gcc_crypto_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "crypto_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -975,8 +991,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -989,8 +1005,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1003,8 +1019,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,8 +1031,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = gcc_xo_gpll0a_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = gcc_xo_gpll0a_dsibyte,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0a_dsibyte,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1034,8 +1050,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_gcc_mdss_esc0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = gcc_xo_dsibyte,
- .num_parents = 2,
+ .parent_data = gcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -1059,8 +1075,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_gcc_mdss_mdp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = gcc_xo_gpll0_dsiphy,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_dsiphy,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_dsiphy),
.ops = &clk_rcg2_ops,
},
};
@@ -1072,8 +1088,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = gcc_xo_gpll0a_dsiphy_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = gcc_xo_gpll0a_dsiphy,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0a_dsiphy,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1091,8 +1107,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_gcc_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = gcc_xo_gpll0a,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a),
.ops = &clk_rcg2_ops,
},
};
@@ -1109,8 +1125,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_gcc_pdm2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1134,8 +1150,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1159,8 +1175,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc2_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1179,8 +1195,8 @@ static struct clk_rcg2 apss_tcu_clk_src = {
.freq_tbl = ftbl_gcc_apss_tcu_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_tcu_clk_src",
- .parent_names = gcc_xo_gpll0a_gpll1_gpll2,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0a_gpll1_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_gpll1_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -1202,8 +1218,8 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
.freq_tbl = ftbl_gcc_bimc_gpu_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_gpu_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_rcg2_ops,
},
@@ -1221,8 +1237,8 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.freq_tbl = ftbl_gcc_usb_hs_system_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1247,8 +1263,8 @@ static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_ahbfabric_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1260,8 +1276,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
- .parent_names = (const char *[]){
- "ultaudio_ahbfabric_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1277,8 +1293,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
- .parent_names = (const char *[]){
- "ultaudio_ahbfabric_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1326,8 +1342,8 @@ static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_pri_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_epi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_epi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1339,8 +1355,8 @@ static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_pri_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1357,8 +1373,8 @@ static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_sec_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1370,8 +1386,8 @@ static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_sec_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1388,8 +1404,8 @@ static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_aux_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1401,8 +1417,8 @@ static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_aux_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1423,8 +1439,8 @@ static struct clk_rcg2 ultaudio_xo_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_xo_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_xo_clk_src",
- .parent_names = gcc_xo_sleep,
- .num_parents = 2,
+ .parent_data = gcc_xo_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1436,8 +1452,8 @@ static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_avsync_xo_clk",
- .parent_names = (const char *[]){
- "ultaudio_xo_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1453,8 +1469,8 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_stc_xo_clk",
- .parent_names = (const char *[]){
- "ultaudio_xo_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1479,8 +1495,8 @@ static struct clk_rcg2 codec_digcodec_clk_src = {
.freq_tbl = ftbl_codec_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "codec_digcodec_clk_src",
- .parent_names = gcc_xo_gpll1_emclk_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1492,8 +1508,8 @@ static struct clk_branch gcc_codec_digcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_codec_digcodec_clk",
- .parent_names = (const char *[]){
- "codec_digcodec_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &codec_digcodec_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1509,8 +1525,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_mport_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1525,8 +1541,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_sway_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1549,8 +1565,8 @@ static struct clk_rcg2 vcodec0_clk_src = {
.freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vcodec0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1563,8 +1579,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1579,8 +1595,8 @@ static struct clk_branch gcc_blsp1_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1596,8 +1612,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1613,8 +1629,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1630,8 +1646,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1647,8 +1663,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1664,8 +1680,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1681,8 +1697,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1698,8 +1714,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1715,8 +1731,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1732,8 +1748,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1749,8 +1765,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1766,8 +1782,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1783,8 +1799,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1800,8 +1816,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1817,8 +1833,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1835,8 +1851,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1851,8 +1867,8 @@ static struct clk_branch gcc_camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1868,8 +1884,8 @@ static struct clk_branch gcc_camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_clk",
- .parent_names = (const char *[]){
- "cci_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1885,8 +1901,8 @@ static struct clk_branch gcc_camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1902,8 +1918,8 @@ static struct clk_branch gcc_camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1919,8 +1935,8 @@ static struct clk_branch gcc_camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phy_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1936,8 +1952,8 @@ static struct clk_branch gcc_camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0pix_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1953,8 +1969,8 @@ static struct clk_branch gcc_camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0rdi_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1970,8 +1986,8 @@ static struct clk_branch gcc_camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1987,8 +2003,8 @@ static struct clk_branch gcc_camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2004,8 +2020,8 @@ static struct clk_branch gcc_camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phy_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2021,8 +2037,8 @@ static struct clk_branch gcc_camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1pix_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2038,8 +2054,8 @@ static struct clk_branch gcc_camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1rdi_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2055,8 +2071,8 @@ static struct clk_branch gcc_camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2072,8 +2088,8 @@ static struct clk_branch gcc_camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp0_clk",
- .parent_names = (const char *[]){
- "camss_gp0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2089,8 +2105,8 @@ static struct clk_branch gcc_camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp1_clk",
- .parent_names = (const char *[]){
- "camss_gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2106,8 +2122,8 @@ static struct clk_branch gcc_camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ispif_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2123,8 +2139,8 @@ static struct clk_branch gcc_camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg0_clk",
- .parent_names = (const char *[]){
- "jpeg0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2140,8 +2156,8 @@ static struct clk_branch gcc_camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2157,8 +2173,8 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2174,8 +2190,8 @@ static struct clk_branch gcc_camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk0_clk",
- .parent_names = (const char *[]){
- "mclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2191,8 +2207,8 @@ static struct clk_branch gcc_camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk1_clk",
- .parent_names = (const char *[]){
- "mclk1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2208,8 +2224,8 @@ static struct clk_branch gcc_camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_micro_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2225,8 +2241,8 @@ static struct clk_branch gcc_camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phytimer_clk",
- .parent_names = (const char *[]){
- "csi0phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2242,8 +2258,8 @@ static struct clk_branch gcc_camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phytimer_clk",
- .parent_names = (const char *[]){
- "csi1phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2259,8 +2275,8 @@ static struct clk_branch gcc_camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2276,8 +2292,8 @@ static struct clk_branch gcc_camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_top_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2293,8 +2309,8 @@ static struct clk_branch gcc_camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2310,8 +2326,8 @@ static struct clk_branch gcc_camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_clk",
- .parent_names = (const char *[]){
- "cpp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2327,8 +2343,8 @@ static struct clk_branch gcc_camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2344,8 +2360,8 @@ static struct clk_branch gcc_camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2361,8 +2377,8 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2379,8 +2395,8 @@ static struct clk_branch gcc_crypto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2397,8 +2413,8 @@ static struct clk_branch gcc_crypto_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_axi_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2415,8 +2431,8 @@ static struct clk_branch gcc_crypto_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_clk",
- .parent_names = (const char *[]){
- "crypto_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2432,8 +2448,8 @@ static struct clk_branch gcc_oxili_gmem_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gmem_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2449,8 +2465,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2466,8 +2482,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2483,8 +2499,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2500,8 +2516,8 @@ static struct clk_branch gcc_mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2517,8 +2533,8 @@ static struct clk_branch gcc_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2534,8 +2550,8 @@ static struct clk_branch gcc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte0_clk",
- .parent_names = (const char *[]){
- "byte0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2551,8 +2567,8 @@ static struct clk_branch gcc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc0_clk",
- .parent_names = (const char *[]){
- "esc0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2568,8 +2584,8 @@ static struct clk_branch gcc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_mdp_clk",
- .parent_names = (const char *[]){
- "mdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2585,8 +2601,8 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk0_clk",
- .parent_names = (const char *[]){
- "pclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2602,8 +2618,8 @@ static struct clk_branch gcc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_vsync_clk",
- .parent_names = (const char *[]){
- "vsync_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2619,25 +2635,8 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
- .halt_reg = 0x49004,
- .clkr = {
- .enable_reg = 0x49004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_q6_bimc_axi_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2653,8 +2652,8 @@ static struct clk_branch gcc_oxili_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2670,8 +2669,8 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gfx3d_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2687,8 +2686,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2704,8 +2703,8 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2722,8 +2721,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2738,8 +2737,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2755,8 +2754,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2772,8 +2771,8 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2789,8 +2788,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2805,13 +2804,30 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_ddr_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE,
},
};
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_apss_tcu_clk = {
.halt_reg = 0x12018,
.clkr = {
@@ -2819,8 +2835,8 @@ static struct clk_branch gcc_apss_tcu_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_tcu_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2835,8 +2851,8 @@ static struct clk_branch gcc_gfx_tcu_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tcu_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2851,8 +2867,8 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_gtcu_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2868,8 +2884,8 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .parent_names = (const char *[]){
- "bimc_gpu_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2885,8 +2901,8 @@ static struct clk_branch gcc_bimc_gpu_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gpu_clk",
- .parent_names = (const char *[]){
- "bimc_gpu_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2902,8 +2918,8 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2919,8 +2935,8 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2936,8 +2952,8 @@ static struct clk_branch gcc_smmu_cfg_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_cfg_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2953,8 +2969,8 @@ static struct clk_branch gcc_venus_tbu_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2970,8 +2986,8 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2987,8 +3003,8 @@ static struct clk_branch gcc_usb2a_phy_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2a_phy_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3004,8 +3020,8 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3021,8 +3037,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]){
- "usb_hs_system_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3038,8 +3054,8 @@ static struct clk_branch gcc_venus0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3055,8 +3071,8 @@ static struct clk_branch gcc_venus0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3072,8 +3088,8 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_vcodec0_clk",
- .parent_names = (const char *[]){
- "vcodec0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 8e2d9fb98ad5..af608f165896 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -614,7 +614,7 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pcnoc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -626,7 +626,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "system_noc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll6a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -638,7 +638,7 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_ddr_clk_src",
.parent_data = gcc_xo_gpll0_bimc_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_parent_data),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -651,7 +651,7 @@ static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "system_mm_noc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll6a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -671,7 +671,7 @@ static struct clk_rcg2 camss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -692,7 +692,7 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_ahb_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -711,7 +711,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -724,7 +724,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -753,7 +753,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -782,7 +782,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_gpll4_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll4_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -801,7 +801,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -826,7 +826,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -839,7 +839,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -853,7 +853,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -866,7 +866,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -880,7 +880,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -893,7 +893,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -907,7 +907,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -920,7 +920,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -934,7 +934,7 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -947,7 +947,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -961,7 +961,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -994,7 +994,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1008,7 +1008,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1028,7 +1028,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = gcc_xo_gpll0a_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1048,7 +1048,7 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1062,7 +1062,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1082,7 +1082,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1102,7 +1102,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1116,7 +1116,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1135,7 +1135,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1148,7 +1148,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1171,7 +1171,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1193,7 +1193,7 @@ static struct clk_rcg2 crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "crypto_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1212,7 +1212,7 @@ static struct clk_rcg2 gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1226,7 +1226,7 @@ static struct clk_rcg2 gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1240,7 +1240,7 @@ static struct clk_rcg2 gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1252,7 +1252,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte_parent_data),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1265,7 +1265,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte_parent_data),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1284,7 +1284,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = gcc_xo_dsibyte_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1297,7 +1297,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = gcc_xo_dsibyte_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1325,7 +1325,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1338,7 +1338,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy_parent_data),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1352,7 +1352,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy_parent_data),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1371,7 +1371,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = gcc_xo_gpll0a_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1390,7 +1390,7 @@ static struct clk_rcg2 pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1416,7 +1416,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1430,7 +1430,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1450,7 +1450,7 @@ static struct clk_rcg2 apss_tcu_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_tcu_clk_src",
.parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1473,7 +1473,7 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_gpu_clk_src",
.parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data),
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_rcg2_ops,
},
@@ -1494,7 +1494,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1512,7 +1512,7 @@ static struct clk_rcg2 usb_fs_system_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_fs_system_clk_src",
.parent_data = gcc_xo_gpll6_gpll0_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll6_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1530,7 +1530,7 @@ static struct clk_rcg2 usb_fs_ic_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_fs_ic_clk_src",
.parent_data = gcc_xo_gpll6_gpll0a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll6_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1556,7 +1556,7 @@ static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_ahbfabric_clk_src",
.parent_data = gcc_xo_gpll0_gpll1_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1568,8 +1568,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1585,8 +1585,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1635,7 +1635,7 @@ static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_pri_i2s_clk_src",
.parent_data = gcc_xo_gpll1_epi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_epi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1647,8 +1647,8 @@ static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_pri_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1666,7 +1666,7 @@ static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_sec_i2s_clk_src",
.parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1678,8 +1678,8 @@ static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_sec_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1697,7 +1697,7 @@ static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_aux_i2s_clk_src",
.parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1709,8 +1709,8 @@ static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_aux_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1732,7 +1732,7 @@ static struct clk_rcg2 ultaudio_xo_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_xo_clk_src",
.parent_data = gcc_xo_sleep_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1744,8 +1744,8 @@ static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_avsync_xo_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1761,8 +1761,8 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_stc_xo_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1788,7 +1788,7 @@ static struct clk_rcg2 codec_digcodec_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "codec_digcodec_clk_src",
.parent_data = gcc_xo_gpll1_emclk_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1800,8 +1800,8 @@ static struct clk_branch gcc_codec_digcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_codec_digcodec_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &codec_digcodec_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &codec_digcodec_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1817,8 +1817,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_mport_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1833,8 +1833,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_sway_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1858,7 +1858,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vcodec0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1871,8 +1871,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1899,8 +1899,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1916,8 +1916,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1933,8 +1933,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1950,8 +1950,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1967,8 +1967,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1984,8 +1984,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2001,8 +2001,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,8 +2018,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2035,8 +2035,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2052,8 +2052,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2069,8 +2069,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2086,8 +2086,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2103,8 +2103,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_uart1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2120,8 +2120,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_uart2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2138,8 +2138,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2154,8 +2154,8 @@ static struct clk_branch gcc_camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2171,8 +2171,8 @@ static struct clk_branch gcc_camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cci_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2188,8 +2188,8 @@ static struct clk_branch gcc_camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2205,8 +2205,8 @@ static struct clk_branch gcc_camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2222,8 +2222,8 @@ static struct clk_branch gcc_camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phy_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2239,8 +2239,8 @@ static struct clk_branch gcc_camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0pix_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2256,8 +2256,8 @@ static struct clk_branch gcc_camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0rdi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2273,8 +2273,8 @@ static struct clk_branch gcc_camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2290,8 +2290,8 @@ static struct clk_branch gcc_camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2307,8 +2307,8 @@ static struct clk_branch gcc_camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phy_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2324,8 +2324,8 @@ static struct clk_branch gcc_camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1pix_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2341,8 +2341,8 @@ static struct clk_branch gcc_camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1rdi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2358,8 +2358,8 @@ static struct clk_branch gcc_camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi_vfe0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vfe0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2375,8 +2375,8 @@ static struct clk_branch gcc_camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_gp0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2392,8 +2392,8 @@ static struct clk_branch gcc_camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2409,8 +2409,8 @@ static struct clk_branch gcc_camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ispif_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2426,8 +2426,8 @@ static struct clk_branch gcc_camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &jpeg0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2443,8 +2443,8 @@ static struct clk_branch gcc_camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2460,8 +2460,8 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2477,8 +2477,8 @@ static struct clk_branch gcc_camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2494,8 +2494,8 @@ static struct clk_branch gcc_camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2511,8 +2511,8 @@ static struct clk_branch gcc_camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_micro_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2528,8 +2528,8 @@ static struct clk_branch gcc_camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2545,8 +2545,8 @@ static struct clk_branch gcc_camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2562,8 +2562,8 @@ static struct clk_branch gcc_camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2579,8 +2579,8 @@ static struct clk_branch gcc_camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_top_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2596,8 +2596,8 @@ static struct clk_branch gcc_camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2613,8 +2613,8 @@ static struct clk_branch gcc_camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cpp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2630,8 +2630,8 @@ static struct clk_branch gcc_camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vfe0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2647,8 +2647,8 @@ static struct clk_branch gcc_camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2664,8 +2664,8 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2682,8 +2682,8 @@ static struct clk_branch gcc_crypto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2700,8 +2700,8 @@ static struct clk_branch gcc_crypto_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2718,8 +2718,8 @@ static struct clk_branch gcc_crypto_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &crypto_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2735,8 +2735,8 @@ static struct clk_branch gcc_oxili_gmem_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gmem_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gfx3d_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2752,8 +2752,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2769,8 +2769,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2786,8 +2786,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2803,8 +2803,8 @@ static struct clk_branch gcc_mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2820,8 +2820,8 @@ static struct clk_branch gcc_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2837,8 +2837,8 @@ static struct clk_branch gcc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &byte0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2854,8 +2854,8 @@ static struct clk_branch gcc_mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &byte1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2871,8 +2871,8 @@ static struct clk_branch gcc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &esc0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2888,8 +2888,8 @@ static struct clk_branch gcc_mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &esc1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2905,8 +2905,8 @@ static struct clk_branch gcc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_mdp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2922,8 +2922,8 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2939,8 +2939,8 @@ static struct clk_branch gcc_mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2956,8 +2956,8 @@ static struct clk_branch gcc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_vsync_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vsync_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2973,8 +2973,8 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_cfg_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2990,8 +2990,8 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3007,8 +3007,8 @@ static struct clk_branch gcc_oxili_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3024,8 +3024,8 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gfx3d_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gfx3d_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3041,8 +3041,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3058,8 +3058,8 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3076,8 +3076,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3092,8 +3092,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3109,8 +3109,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &sdcc1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3126,8 +3126,8 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3143,8 +3143,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &sdcc2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3161,8 +3161,8 @@ static struct clk_branch gcc_apss_tcu_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_tcu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3178,8 +3178,8 @@ static struct clk_branch gcc_gfx_tcu_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tcu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3195,8 +3195,8 @@ static struct clk_branch gcc_gfx_tbu_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3212,8 +3212,8 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3230,8 +3230,8 @@ static struct clk_branch gcc_venus_tbu_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3248,8 +3248,8 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3266,8 +3266,8 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3284,8 +3284,8 @@ static struct clk_branch gcc_smmu_cfg_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_cfg_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3302,8 +3302,8 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_gtcu_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3320,8 +3320,8 @@ static struct clk_branch gcc_cpp_tbu_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_cpp_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3338,8 +3338,8 @@ static struct clk_branch gcc_mdp_rt_tbu_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_rt_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3355,8 +3355,8 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_gpu_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3372,8 +3372,8 @@ static struct clk_branch gcc_bimc_gpu_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gpu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_gpu_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3401,8 +3401,8 @@ static struct clk_branch gcc_usb_fs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3418,8 +3418,8 @@ static struct clk_branch gcc_usb_fs_ic_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_ic_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_fs_ic_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs_ic_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3435,8 +3435,8 @@ static struct clk_branch gcc_usb_fs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_system_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_fs_system_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3452,8 +3452,8 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3469,8 +3469,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_hs_system_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3486,8 +3486,8 @@ static struct clk_branch gcc_venus0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3503,8 +3503,8 @@ static struct clk_branch gcc_venus0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3520,8 +3520,8 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3537,8 +3537,8 @@ static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_core0_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3554,8 +3554,8 @@ static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_core1_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index a6e13b91e4c8..9dd4e7ffa1f8 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -35,7 +35,9 @@ static struct clk_pll pll3 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll3",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -46,7 +48,9 @@ static struct clk_regmap pll4_vote = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "pll4_vote",
- .parent_names = (const char *[]){ "pll4" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pll4", .name = "pll4",
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -62,7 +66,9 @@ static struct clk_pll pll8 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll8",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -73,7 +79,9 @@ static struct clk_regmap pll8_vote = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "pll8_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll8.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -96,7 +104,9 @@ static struct hfpll_data hfpll0_data = {
static struct clk_hfpll hfpll0 = {
.d = &hfpll0_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll0",
.ops = &clk_ops_hfpll,
@@ -136,7 +146,9 @@ static struct hfpll_data hfpll1_data = {
static struct clk_hfpll hfpll1 = {
.d = &hfpll1_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll1",
.ops = &clk_ops_hfpll,
@@ -162,7 +174,9 @@ static struct hfpll_data hfpll2_data = {
static struct clk_hfpll hfpll2 = {
.d = &hfpll2_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll2",
.ops = &clk_ops_hfpll,
@@ -188,7 +202,9 @@ static struct hfpll_data hfpll3_data = {
static struct clk_hfpll hfpll3 = {
.d = &hfpll3_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll3",
.ops = &clk_ops_hfpll,
@@ -228,7 +244,9 @@ static struct hfpll_data hfpll_l2_data = {
static struct clk_hfpll hfpll_l2 = {
.d = &hfpll_l2_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll_l2",
.ops = &clk_ops_hfpll,
@@ -247,7 +265,9 @@ static struct clk_pll pll14 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll14",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -258,7 +278,9 @@ static struct clk_regmap pll14_vote = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "pll14_vote",
- .parent_names = (const char *[]){ "pll14" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll14.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -276,9 +298,9 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char * const gcc_pxo_pll8[] = {
- "pxo",
- "pll8_vote",
+static const struct clk_parent_data gcc_pxo_pll8[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
};
static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
@@ -287,10 +309,10 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char * const gcc_pxo_pll8_cxo[] = {
- "pxo",
- "pll8_vote",
- "cxo",
+static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .fw_name = "cxo", .name = "cxo_board" },
};
static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
@@ -299,10 +321,10 @@ static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
{ P_PLL3, 6 }
};
-static const char * const gcc_pxo_pll8_pll3[] = {
- "pxo",
- "pll8_vote",
- "pll3",
+static const struct clk_parent_data gcc_pxo_pll8_pll3[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .hw = &pll3.clkr.hw },
};
static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -348,8 +370,8 @@ static struct clk_rcg gsbi1_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -364,8 +386,8 @@ static struct clk_branch gsbi1_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_clk",
- .parent_names = (const char *[]){
- "gsbi1_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -399,8 +421,8 @@ static struct clk_rcg gsbi2_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -415,8 +437,8 @@ static struct clk_branch gsbi2_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_clk",
- .parent_names = (const char *[]){
- "gsbi2_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -450,8 +472,8 @@ static struct clk_rcg gsbi3_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -466,8 +488,8 @@ static struct clk_branch gsbi3_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_clk",
- .parent_names = (const char *[]){
- "gsbi3_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -501,8 +523,8 @@ static struct clk_rcg gsbi4_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -517,8 +539,8 @@ static struct clk_branch gsbi4_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_clk",
- .parent_names = (const char *[]){
- "gsbi4_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -552,8 +574,8 @@ static struct clk_rcg gsbi5_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -568,8 +590,8 @@ static struct clk_branch gsbi5_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_clk",
- .parent_names = (const char *[]){
- "gsbi5_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -603,8 +625,8 @@ static struct clk_rcg gsbi6_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -619,8 +641,8 @@ static struct clk_branch gsbi6_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_clk",
- .parent_names = (const char *[]){
- "gsbi6_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -654,8 +676,8 @@ static struct clk_rcg gsbi7_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -670,8 +692,8 @@ static struct clk_branch gsbi7_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_clk",
- .parent_names = (const char *[]){
- "gsbi7_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -705,8 +727,8 @@ static struct clk_rcg gsbi8_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -721,7 +743,9 @@ static struct clk_branch gsbi8_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_clk",
- .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -754,8 +778,8 @@ static struct clk_rcg gsbi9_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -770,7 +794,9 @@ static struct clk_branch gsbi9_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_clk",
- .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -803,8 +829,8 @@ static struct clk_rcg gsbi10_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -819,7 +845,9 @@ static struct clk_branch gsbi10_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_clk",
- .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -852,8 +880,8 @@ static struct clk_rcg gsbi11_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -868,7 +896,9 @@ static struct clk_branch gsbi11_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_clk",
- .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -901,8 +931,8 @@ static struct clk_rcg gsbi12_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -917,7 +947,9 @@ static struct clk_branch gsbi12_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_clk",
- .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -963,8 +995,8 @@ static struct clk_rcg gsbi1_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -979,7 +1011,9 @@ static struct clk_branch gsbi1_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_clk",
- .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1012,8 +1046,8 @@ static struct clk_rcg gsbi2_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1028,7 +1062,9 @@ static struct clk_branch gsbi2_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_clk",
- .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1061,8 +1097,8 @@ static struct clk_rcg gsbi3_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1077,7 +1113,9 @@ static struct clk_branch gsbi3_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_clk",
- .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1110,8 +1148,8 @@ static struct clk_rcg gsbi4_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1126,7 +1164,9 @@ static struct clk_branch gsbi4_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_clk",
- .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1159,8 +1199,8 @@ static struct clk_rcg gsbi5_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1175,7 +1215,9 @@ static struct clk_branch gsbi5_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_clk",
- .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1208,8 +1250,8 @@ static struct clk_rcg gsbi6_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1224,7 +1266,9 @@ static struct clk_branch gsbi6_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_clk",
- .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1257,8 +1301,8 @@ static struct clk_rcg gsbi7_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1273,7 +1317,9 @@ static struct clk_branch gsbi7_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_clk",
- .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1306,8 +1352,8 @@ static struct clk_rcg gsbi8_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1322,7 +1368,9 @@ static struct clk_branch gsbi8_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_clk",
- .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1355,8 +1403,8 @@ static struct clk_rcg gsbi9_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1371,7 +1419,9 @@ static struct clk_branch gsbi9_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_clk",
- .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1404,8 +1454,8 @@ static struct clk_rcg gsbi10_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1420,7 +1470,9 @@ static struct clk_branch gsbi10_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_clk",
- .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1453,8 +1505,8 @@ static struct clk_rcg gsbi11_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1469,7 +1521,9 @@ static struct clk_branch gsbi11_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_clk",
- .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1502,8 +1556,8 @@ static struct clk_rcg gsbi12_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1518,7 +1572,9 @@ static struct clk_branch gsbi12_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_clk",
- .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1564,8 +1620,8 @@ static struct clk_rcg gp0_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp0_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1580,7 +1636,9 @@ static struct clk_branch gp0_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp0_clk",
- .parent_names = (const char *[]){ "gp0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1613,8 +1671,8 @@ static struct clk_rcg gp1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp1_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1629,7 +1687,9 @@ static struct clk_branch gp1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp1_clk",
- .parent_names = (const char *[]){ "gp1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1662,8 +1722,8 @@ static struct clk_rcg gp2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp2_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1678,7 +1738,9 @@ static struct clk_branch gp2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp2_clk",
- .parent_names = (const char *[]){ "gp2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1714,8 +1776,8 @@ static struct clk_rcg prng_src = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "prng_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
},
@@ -1730,7 +1792,9 @@ static struct clk_branch prng_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "prng_clk",
- .parent_names = (const char *[]){ "prng_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &prng_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -1776,8 +1840,8 @@ static struct clk_rcg sdc1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc1_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1791,7 +1855,9 @@ static struct clk_branch sdc1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc1_clk",
- .parent_names = (const char *[]){ "sdc1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1824,8 +1890,8 @@ static struct clk_rcg sdc2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc2_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1839,7 +1905,9 @@ static struct clk_branch sdc2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc2_clk",
- .parent_names = (const char *[]){ "sdc2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1872,8 +1940,8 @@ static struct clk_rcg sdc3_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc3_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1887,7 +1955,9 @@ static struct clk_branch sdc3_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc3_clk",
- .parent_names = (const char *[]){ "sdc3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1920,8 +1990,8 @@ static struct clk_rcg sdc4_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc4_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1935,7 +2005,9 @@ static struct clk_branch sdc4_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc4_clk",
- .parent_names = (const char *[]){ "sdc4_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc4_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1968,8 +2040,8 @@ static struct clk_rcg sdc5_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc5_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1983,7 +2055,9 @@ static struct clk_branch sdc5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc5_clk",
- .parent_names = (const char *[]){ "sdc5_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc5_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2021,8 +2095,8 @@ static struct clk_rcg tsif_ref_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2037,7 +2111,9 @@ static struct clk_branch tsif_ref_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk",
- .parent_names = (const char *[]){ "tsif_ref_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &tsif_ref_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2075,8 +2151,8 @@ static struct clk_rcg usb_hs1_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2091,7 +2167,9 @@ static struct clk_branch usb_hs1_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs1_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2124,8 +2202,8 @@ static struct clk_rcg usb_hs3_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2140,7 +2218,9 @@ static struct clk_branch usb_hs3_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs3_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs3_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2173,8 +2253,8 @@ static struct clk_rcg usb_hs4_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2189,7 +2269,9 @@ static struct clk_branch usb_hs4_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs4_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs4_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2222,16 +2304,14 @@ static struct clk_rcg usb_hsic_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
-
static struct clk_branch usb_hsic_xcvr_fs_clk = {
.halt_reg = 0x2fc8,
.halt_bit = 2,
@@ -2240,7 +2320,9 @@ static struct clk_branch usb_hsic_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_xcvr_fs_clk",
- .parent_names = usb_hsic_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2255,7 +2337,9 @@ static struct clk_branch usb_hsic_system_clk = {
.enable_reg = 0x292c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_hsic_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_hsic_system_clk",
.ops = &clk_branch_ops,
@@ -2271,7 +2355,9 @@ static struct clk_branch usb_hsic_hsic_clk = {
.enable_reg = 0x2b44,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pll14_vote" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll14_vote.hw
+ },
.num_parents = 1,
.name = "usb_hsic_hsic_clk",
.ops = &clk_branch_ops,
@@ -2317,16 +2403,14 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
-
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 15,
@@ -2335,7 +2419,9 @@ static struct clk_branch usb_fs1_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_clk",
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2350,7 +2436,9 @@ static struct clk_branch usb_fs1_system_clk = {
.enable_reg = 0x296c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_fs1_system_clk",
.ops = &clk_branch_ops,
@@ -2384,16 +2472,14 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
-
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 12,
@@ -2402,7 +2488,9 @@ static struct clk_branch usb_fs2_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2418,7 +2506,9 @@ static struct clk_branch usb_fs2_system_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_system_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2872,8 +2962,8 @@ static struct clk_rcg ce3_src = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
- .parent_names = gcc_pxo_pll8_pll3,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_pll3,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll3),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2888,7 +2978,9 @@ static struct clk_branch ce3_core_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
- .parent_names = (const char *[]){ "ce3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ce3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2904,7 +2996,9 @@ static struct clk_branch ce3_h_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_h_clk",
- .parent_names = (const char *[]){ "ce3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ce3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2934,8 +3028,8 @@ static struct clk_rcg sata_clk_src = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "sata_clk_src",
- .parent_names = gcc_pxo_pll8_pll3,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_pll3,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll3),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2950,7 +3044,9 @@ static struct clk_branch sata_rxoob_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_rxoob_clk",
- .parent_names = (const char *[]){ "sata_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_clk_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2966,7 +3062,9 @@ static struct clk_branch sata_pmalive_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_pmalive_clk",
- .parent_names = (const char *[]){ "sata_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_clk_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2982,7 +3080,9 @@ static struct clk_branch sata_phy_ref_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_phy_ref_clk",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index b6fa7b8e8006..7792b8f23704 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -54,33 +54,9 @@ static const struct pll_vco spark_vco[] = {
{ 750000000, 1500000000, 1 },
};
-static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_USER_CTL_U] = 0x1C,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_STATUS] = 0x24,
- },
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x1C,
- [PLL_OFF_STATUS] = 0x20,
- },
-};
-
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(0),
@@ -106,7 +82,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
.post_div_table = post_div_table_gpll0_out_aux2,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_aux2",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -117,7 +93,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(1),
@@ -147,7 +123,7 @@ static struct clk_alpha_pll gpll10 = {
.offset = 0xa000,
.vco_table = spark_vco,
.num_vco = ARRAY_SIZE(spark_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(10),
@@ -179,7 +155,7 @@ static struct clk_alpha_pll gpll11 = {
.offset = 0xb000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -197,7 +173,7 @@ static struct clk_alpha_pll gpll11 = {
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(3),
@@ -223,7 +199,7 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
.post_div_table = post_div_table_gpll3_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll3.clkr.hw },
@@ -234,7 +210,7 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
static struct clk_alpha_pll gpll4 = {
.offset = 0x4000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(4),
@@ -251,7 +227,7 @@ static struct clk_alpha_pll gpll4 = {
static struct clk_alpha_pll gpll5 = {
.offset = 0x5000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(5),
@@ -268,7 +244,7 @@ static struct clk_alpha_pll gpll5 = {
static struct clk_alpha_pll gpll6 = {
.offset = 0x6000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(6),
@@ -294,7 +270,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
.post_div_table = post_div_table_gpll6_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
@@ -305,7 +281,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
static struct clk_alpha_pll gpll7 = {
.offset = 0x7000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(7),
@@ -340,7 +316,7 @@ static struct clk_alpha_pll gpll8 = {
.offset = 0x8000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -367,7 +343,7 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = {
.post_div_table = post_div_table_gpll8_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll8_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
@@ -393,7 +369,7 @@ static struct clk_alpha_pll gpll9 = {
.offset = 0x9000,
.vco_table = brammo_vco,
.num_vco = ARRAY_SIZE(brammo_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(9),
@@ -419,7 +395,7 @@ static struct clk_alpha_pll_postdiv gpll9_out_main = {
.post_div_table = post_div_table_gpll9_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
.width = 2,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll9_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index c2ea09945c47..2d3980251e78 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -2224,7 +2224,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 7ff64d4d5920..8afb7575e712 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -3108,7 +3108,7 @@ static struct gdsc gcc_pcie_1_gdsc = {
.pd = {
.name = "gcc_pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
@@ -3126,7 +3126,7 @@ static struct gdsc gcc_usb30_prim_gdsc = {
.pd = {
.name = "gcc_usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
@@ -3135,7 +3135,7 @@ static struct gdsc gcc_usb30_sec_gdsc = {
.pd = {
.name = "gcc_usb30_sec_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index a2f3ffcc5849..a18ed88f3b82 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -6768,6 +6768,10 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE,
};
+/*
+ * The Qualcomm PCIe driver does not yet implement suspend so to keep the
+ * PCIe power domains always-on for now.
+ */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
@@ -6776,7 +6780,7 @@ static struct gdsc pcie_2a_gdsc = {
.name = "pcie_2a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_2b_gdsc = {
@@ -6787,7 +6791,7 @@ static struct gdsc pcie_2b_gdsc = {
.name = "pcie_2b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_3a_gdsc = {
@@ -6798,7 +6802,7 @@ static struct gdsc pcie_3a_gdsc = {
.name = "pcie_3a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_3b_gdsc = {
@@ -6809,7 +6813,7 @@ static struct gdsc pcie_3b_gdsc = {
.name = "pcie_3b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_4_gdsc = {
@@ -6820,7 +6824,7 @@ static struct gdsc pcie_4_gdsc = {
.name = "pcie_4_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc ufs_card_gdsc = {
@@ -6844,7 +6848,7 @@ static struct gdsc usb30_mp_gdsc = {
.pd = {
.name = "usb30_mp_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc usb30_prim_gdsc = {
@@ -6852,7 +6856,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc usb30_sec_gdsc = {
@@ -6860,7 +6864,7 @@ static struct gdsc usb30_sec_gdsc = {
.pd = {
.name = "usb30_sec_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct clk_regmap *gcc_sc8280xp_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 9b97425008ce..db918c92a522 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -757,7 +757,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
.num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 58aa3ec9a7fc..6af08e0ca847 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -31,6 +31,7 @@ enum {
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -68,6 +69,23 @@ static struct clk_alpha_pll gpll4 = {
},
};
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo", .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -194,6 +212,19 @@ static const struct clk_parent_data gcc_parent_data_10[] = {
{ .hw = &gpll0_out_even.clkr.hw },
};
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -233,6 +264,26 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sdm670_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdm670_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_data = gcc_parent_data_8_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -656,6 +707,54 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
};
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x26028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x26010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
@@ -705,6 +804,31 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sdm670_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdm670_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdm670_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
F(105495, P_BI_TCXO, 2, 1, 91),
{ }
@@ -1283,6 +1407,28 @@ static struct clk_branch gcc_cpuss_rbcpr_clk = {
},
};
+/*
+ * The source clock frequencies are different for SDM670; define a child clock
+ * pointing to the source clock that uses SDM670 frequencies.
+ */
+static struct clk_branch gcc_sdm670_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdm670_cpuss_rbcpr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x44038,
.halt_check = BRANCH_VOTED,
@@ -2353,6 +2499,55 @@ static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
},
};
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x14008,
.halt_check = BRANCH_HALT,
@@ -2415,6 +2610,28 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
+/*
+ * The source clock frequencies are different for SDM670; define a child clock
+ * pointing to the source clock that uses SDM670 frequencies.
+ */
+static struct clk_branch gcc_sdm670_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdm670_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
.halt_reg = 0x414c,
.halt_check = BRANCH_HALT_VOTED,
@@ -3308,6 +3525,155 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.flags = VOTABLE,
};
+static struct clk_regmap *gcc_sdm670_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_sdm670_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_sdm670_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdm670_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdm670_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+};
+
static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3533,6 +3899,22 @@ static const struct qcom_reset_map gcc_sdm845_resets[] = {
[GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
};
+static struct gdsc *gcc_sdm670_gdscs[] = {
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
static struct gdsc *gcc_sdm845_gdscs[] = {
[PCIE_0_GDSC] = &pcie_0_gdsc,
[PCIE_1_GDSC] = &pcie_1_gdsc,
@@ -3563,6 +3945,17 @@ static const struct regmap_config gcc_sdm845_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc gcc_sdm670_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm670_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm670_clocks),
+ /* Snapdragon 670 can function without its own exclusive resets. */
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm670_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm670_gdscs),
+};
+
static const struct qcom_cc_desc gcc_sdm845_desc = {
.config = &gcc_sdm845_regmap_config,
.clks = gcc_sdm845_clocks,
@@ -3574,7 +3967,8 @@ static const struct qcom_cc_desc gcc_sdm845_desc = {
};
static const struct of_device_id gcc_sdm845_match_table[] = {
- { .compatible = "qcom,gcc-sdm845" },
+ { .compatible = "qcom,gcc-sdm670", .data = &gcc_sdm670_desc },
+ { .compatible = "qcom,gcc-sdm845", .data = &gcc_sdm845_desc },
{ }
};
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
@@ -3600,6 +3994,7 @@ static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
static int gcc_sdm845_probe(struct platform_device *pdev)
{
+ const struct qcom_cc_desc *gcc_desc;
struct regmap *regmap;
int ret;
@@ -3616,7 +4011,8 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+ gcc_desc = of_device_get_match_data(&pdev->dev);
+ return qcom_cc_really_probe(pdev, gcc_desc, regmap);
}
static struct platform_driver gcc_sdm845_driver = {
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index 68fe9f6f0d2f..565f9912039f 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -57,7 +57,7 @@ static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(0),
@@ -83,7 +83,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
.post_div_table = post_div_table_gpll0_out_aux2,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_aux2",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -92,18 +92,6 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
},
};
-/* listed as BRAMMO, but it doesn't really match */
-static const u8 clk_gpll9_regs[PLL_OFF_MAX_REGS] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x1C,
- [PLL_OFF_STATUS] = 0x20,
-};
-
static const struct clk_div_table post_div_table_gpll0_out_main[] = {
{ 0x0, 1 },
{ }
@@ -115,7 +103,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_main = {
.post_div_table = post_div_table_gpll0_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -137,7 +125,7 @@ static struct clk_alpha_pll gpll10 = {
.offset = 0xa000,
.vco_table = gpll10_vco,
.num_vco = ARRAY_SIZE(gpll10_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(10),
@@ -163,7 +151,7 @@ static struct clk_alpha_pll_postdiv gpll10_out_main = {
.post_div_table = post_div_table_gpll10_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll10_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll10.clkr.hw },
@@ -189,7 +177,7 @@ static struct clk_alpha_pll gpll11 = {
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
.flags = SUPPORTS_DYNAMIC_UPDATE,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(11),
@@ -215,7 +203,7 @@ static struct clk_alpha_pll_postdiv gpll11_out_main = {
.post_div_table = post_div_table_gpll11_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll11_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll11.clkr.hw },
@@ -229,7 +217,7 @@ static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(3),
@@ -248,7 +236,7 @@ static struct clk_alpha_pll gpll4 = {
.offset = 0x4000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(4),
@@ -274,7 +262,7 @@ static struct clk_alpha_pll_postdiv gpll4_out_main = {
.post_div_table = post_div_table_gpll4_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll4.clkr.hw },
@@ -287,7 +275,7 @@ static struct clk_alpha_pll gpll6 = {
.offset = 0x6000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(6),
@@ -313,7 +301,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
.post_div_table = post_div_table_gpll6_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
@@ -326,7 +314,7 @@ static struct clk_alpha_pll gpll7 = {
.offset = 0x7000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(7),
@@ -352,7 +340,7 @@ static struct clk_alpha_pll_postdiv gpll7_out_main = {
.post_div_table = post_div_table_gpll7_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll7_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll7.clkr.hw },
@@ -380,7 +368,7 @@ static struct clk_alpha_pll gpll8 = {
.offset = 0x8000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -407,7 +395,7 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = {
.post_div_table = post_div_table_gpll8_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll8_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
@@ -431,7 +419,7 @@ static struct clk_alpha_pll gpll9 = {
.offset = 0x9000,
.vco_table = gpll9_vco,
.num_vco = ARRAY_SIZE(gpll9_vco),
- .regs = clk_gpll9_regs,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(9),
@@ -457,7 +445,7 @@ static struct clk_alpha_pll_postdiv gpll9_out_main = {
.post_div_table = post_div_table_gpll9_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
.width = 2,
- .regs = clk_gpll9_regs,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll9_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 69412400efa4..9b4e4bb05963 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2316,7 +2316,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_phy_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm6375.c b/drivers/clk/qcom/gcc-sm6375.c
new file mode 100644
index 000000000000..89a1cc90b145
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm6375.c
@@ -0,0 +1,3919 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6375-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_ODD,
+ P_GPLL10_OUT_EVEN,
+ P_GPLL11_OUT_EVEN,
+ P_GPLL11_OUT_ODD,
+ P_GPLL3_OUT_EVEN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_GPLL5_OUT_EVEN,
+ P_GPLL6_OUT_EVEN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_EVEN,
+ P_GPLL8_OUT_EVEN,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+ { 595200000, 3600000000UL, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_gpll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+/* 1152MHz Configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+/* 532MHz Configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1b,
+ .alpha = 0xb555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+/* 400MHz Configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 1440MHz Configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x4b,
+ .alpha = 0x0,
+ .config_ctl_val = 0x08200800,
+ .config_ctl_hi_val = 0x05022011,
+ .config_ctl_hi1_val = 0x08000000,
+ .user_ctl_val = 0x00000301,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL6_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll6_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+ { P_GPLL4_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL5_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll5.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL4_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL7_OUT_EVEN, 3 },
+ { P_GPLL4_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_ODD, 2 },
+ { P_GPLL11_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_0_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_cci_1_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_1_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5901c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x59038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x59054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi3phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 15),
+ F(65454545, P_GPLL9_OUT_EARLY, 11, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk4_clk_src = {
+ .cmd_rcgr = 0x51070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk4_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(266600000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(580000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(144000000, P_GPLL9_OUT_MAIN, 2.5, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(180000000, P_GPLL9_OUT_MAIN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(329142857, P_GPLL10_OUT_EVEN, 3.5, 0, 0),
+ F(384000000, P_GPLL10_OUT_EVEN, 3, 0, 0),
+ F(460800000, P_GPLL10_OUT_EVEN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(266571429, P_GPLL5_OUT_EVEN, 3.5, 0, 0),
+ F(426400000, P_GPLL3_OUT_MAIN, 2.5, 0, 0),
+ F(466500000, P_GPLL5_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ .cmd_rcgr = 0x52044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x520d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(256000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x2b13c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2_ao),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x5301c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x5314c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x5327c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x533ac,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x534dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x5360c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x45020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x45048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4507c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x45060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133000000, P_GPLL11_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_GPLL11_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x2b154,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_1_clk = {
+ .halt_reg = 0x5c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_2_clk = {
+ .halt_reg = 0x52090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_3_clk = {
+ .halt_reg = 0x520f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x59018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+ .halt_reg = 0x59050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi3phytimer_clk = {
+ .halt_reg = 0x5906c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk4_clk = {
+ .halt_reg = 0x51088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_ope_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_ope_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_clk = {
+ .halt_reg = 0x5205c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x52084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_csid_clk = {
+ .halt_reg = 0x520ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+ .reg = 0x17058,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names =
+ (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_disp_gpll0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sleep_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x53014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x5300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x53148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x53278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x533a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x534d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x53608,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x53004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x53008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x45014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x45044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x45078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x4501c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x45040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx5_pcie_clkref_en_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx5_pcie_clkref_en_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x45004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x58004,
+ .pd = {
+ .name = "camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x5807c,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x58098,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc = {
+ .gdscr = 0x7d074,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc = {
+ .gdscr = 0x7d078,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d07c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm6375_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK_SRC] = &gcc_camss_cci_0_clk_src.clkr,
+ [GCC_CAMSS_CCI_1_CLK] = &gcc_camss_cci_1_clk.clkr,
+ [GCC_CAMSS_CCI_1_CLK_SRC] = &gcc_camss_cci_1_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr,
+ [GCC_CAMSS_CPHY_3_CLK] = &gcc_camss_cphy_3_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI3PHYTIMER_CLK] = &gcc_camss_csi3phytimer_clk.clkr,
+ [GCC_CAMSS_CSI3PHYTIMER_CLK_SRC] = &gcc_camss_csi3phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_MCLK4_CLK] = &gcc_camss_mclk4_clk.clkr,
+ [GCC_CAMSS_MCLK4_CLK_SRC] = &gcc_camss_mclk4_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr,
+ [GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SLEEP_CLK] = &gcc_disp_sleep_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_RX5_PCIE_CLKREF_EN_CLK] = &gcc_rx5_pcie_clkref_en_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_EVEN] = &gpll3_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_EVEN] = &gpll6_out_even.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_EVEN] = &gpll8_out_even.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm6375_resets[] = {
+ [GCC_MMSS_BCR] = { 0x17000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x1b020 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_SDCC2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x1f000 },
+ [GCC_PDM_BCR] = { 0x20000 },
+ [GCC_GPU_BCR] = { 0x36000 },
+ [GCC_SDCC1_BCR] = { 0x38000 },
+ [GCC_UFS_PHY_BCR] = { 0x45000 },
+ [GCC_CAMSS_TFE_BCR] = { 0x52000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x53000 },
+ [GCC_CAMSS_OPE_BCR] = { 0x55000 },
+ [GCC_CAMSS_TOP_BCR] = { 0x58000 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static struct gdsc *gcc_sm6375_gdscs[] = {
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+};
+
+static const struct regmap_config gcc_sm6375_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm6375_desc = {
+ .config = &gcc_sm6375_regmap_config,
+ .clks = gcc_sm6375_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm6375_clocks),
+ .resets = gcc_sm6375_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm6375_resets),
+ .gdscs = gcc_sm6375_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm6375_gdscs),
+};
+
+static const struct of_device_id gcc_sm6375_match_table[] = {
+ { .compatible = "qcom,sm6375-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm6375_match_table);
+
+static int gcc_sm6375_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm6375_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep the following clocks always on:
+ * GCC_CAMERA_XO_CLK, GCC_CPUSS_GNOC_CLK, GCC_DISP_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x17028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x1702c, BIT(0), BIT(0));
+
+ clk_lucid_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_lucid_pll_configure(&gpll11, regmap, &gpll11_config);
+ clk_lucid_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_zonda_pll_configure(&gpll9, regmap, &gpll9_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_sm6375_desc, regmap);
+}
+
+static struct platform_driver gcc_sm6375_driver = {
+ .probe = gcc_sm6375_probe,
+ .driver = {
+ .name = "gcc-sm6375",
+ .of_match_table = gcc_sm6375_match_table,
+ },
+};
+
+static int __init gcc_sm6375_init(void)
+{
+ return platform_driver_register(&gcc_sm6375_driver);
+}
+subsys_initcall(gcc_sm6375_init);
+
+static void __exit gcc_sm6375_exit(void)
+{
+ platform_driver_unregister(&gcc_sm6375_driver);
+}
+module_exit(gcc_sm6375_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM6375 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index d3244006c661..7cf5e130e92f 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -368,6 +368,16 @@ static int _gdsc_disable(struct gdsc *sc)
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
+ /*
+ * If the GDSC supports only a Retention state, apart from ON,
+ * leave it in ON state.
+ * There is no SW control to transition the GDSC into
+ * Retention state. This happens in HW when the parent
+ * domain goes down to a Low power state
+ */
+ if (sc->pwrsts == PWRSTS_RET_ON)
+ return 0;
+
ret = gdsc_toggle_logic(sc, GDSC_OFF);
if (ret)
return ret;
@@ -439,11 +449,8 @@ static int gdsc_init(struct gdsc *sc)
/* ...and the power-domain */
ret = gdsc_pm_runtime_get(sc);
- if (ret) {
- if (sc->rsupply)
- regulator_disable(sc->rsupply);
- return ret;
- }
+ if (ret)
+ goto err_disable_supply;
/*
* Votable GDSCs can be ON due to Vote from other masters.
@@ -452,14 +459,14 @@ static int gdsc_init(struct gdsc *sc)
if (sc->flags & VOTABLE) {
ret = gdsc_update_collapse_bit(sc, false);
if (ret)
- return ret;
+ goto err_put_rpm;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
- return ret;
+ goto err_put_rpm;
}
/*
@@ -486,9 +493,21 @@ static int gdsc_init(struct gdsc *sc)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)
sc->pd.power_on = gdsc_enable;
- pm_genpd_init(&sc->pd, NULL, !on);
+
+ ret = pm_genpd_init(&sc->pd, NULL, !on);
+ if (ret)
+ goto err_put_rpm;
return 0;
+
+err_put_rpm:
+ if (on)
+ gdsc_pm_runtime_put(sc);
+err_disable_supply:
+ if (on && sc->rsupply)
+ regulator_disable(sc->rsupply);
+
+ return ret;
}
int gdsc_register(struct gdsc_desc *desc,
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5de48c9439b2..981a12c8502d 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -49,6 +49,11 @@ struct gdsc {
const u8 pwrsts;
/* Powerdomain allowable state bitfields */
#define PWRSTS_OFF BIT(0)
+/*
+ * There is no SW control to transition a GDSC into
+ * PWRSTS_RET. This happens in HW when the parent
+ * domain goes down to a low power state
+ */
#define PWRSTS_RET BIT(1)
#define PWRSTS_ON BIT(2)
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
new file mode 100644
index 000000000000..ea1e9505c335
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc8280xp.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+ DT_GCC_GPU_GPLL0_DIV_CLK_SRC,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPU_GPLL0_CLK_SRC,
+ P_GCC_GPU_GPLL0_DIV_CLK_SRC,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct clk_parent_data parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1800000000, 0 },
+};
+
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1c,
+ .alpha = 0xa555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1A,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GCC_GPU_GPLL0_DIV_CLK_SRC, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GCC_GPU_GPLL0_CLK_SRC, 3, 0, 0),
+ F(300000000, P_GCC_GPU_GPLL0_CLK_SRC, 2, 0, 0),
+ F(400000000, P_GCC_GPU_GPLL0_CLK_SRC, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpu_cc_sc8280xp_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *gpu_cc_sc8280xp_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
+ .config = &gpu_cc_sc8280xp_regmap_config,
+ .clks = gpu_cc_sc8280xp_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sc8280xp_clocks),
+ .gdscs = gpu_cc_sc8280xp_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sc8280xp_gdscs),
+};
+
+static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sc8280xp_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Keep the clocks always-ON
+ * GPU_CC_CB_CLK, GPU_CC_CXO_CLK
+ */
+ regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x109c, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
+}
+
+static const struct of_device_id gpu_cc_sc8280xp_match_table[] = {
+ { .compatible = "qcom,sc8280xp-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc8280xp_match_table);
+
+static struct platform_driver gpu_cc_sc8280xp_driver = {
+ .probe = gpu_cc_sc8280xp_probe,
+ .driver = {
+ .name = "gpu_cc-sc8280xp",
+ .of_match_table = gpu_cc_sc8280xp_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sc8280xp_driver);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP GPU clock controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index 88d4b33ac0cc..b1b370274ec4 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -12,9 +12,9 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-static const char *aux_parents[] = {
- "pll8_vote",
- "pxo",
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .fw_name = "pxo", .name = "pxo_board" },
};
static const u32 aux_parent_map[] = {
@@ -32,8 +32,8 @@ MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
static int kpss_xcc_driver_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
- struct clk *clk;
void __iomem *base;
+ struct clk_hw *hw;
const char *name;
id = of_match_device(kpss_xcc_match_table, &pdev->dev);
@@ -55,24 +55,16 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
base += 0x28;
}
- clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
- ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
- 0, aux_parent_map, NULL);
+ hw = devm_clk_hw_register_mux_parent_data_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0,
+ base, 0, 0x3,
+ 0, aux_parent_map, NULL);
- platform_set_drvdata(pdev, clk);
-
- return PTR_ERR_OR_ZERO(clk);
-}
-
-static int kpss_xcc_driver_remove(struct platform_device *pdev)
-{
- clk_unregister_mux(platform_get_drvdata(pdev));
- return 0;
+ return PTR_ERR_OR_ZERO(hw);
}
static struct platform_driver kpss_xcc_driver = {
.probe = kpss_xcc_driver_probe,
- .remove = kpss_xcc_driver_remove,
.driver = {
.name = "kpss-xcc",
.of_match_table = kpss_xcc_match_table,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 1a2be4aeb31d..81a44a9a9abc 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -22,6 +22,7 @@
#include "clk-branch.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "reset.h"
static struct clk_pll pll4 = {
.l_reg = 0x4,
@@ -33,7 +34,9 @@ static struct clk_pll pll4 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll4",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -63,9 +66,9 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char * const lcc_pxo_pll4[] = {
- "pxo",
- "pll4_vote",
+static const struct clk_parent_data lcc_pxo_pll4[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll4_vote", .name = "pll4_vote" },
};
static struct freq_tbl clk_tbl_aif_mi2s[] = {
@@ -130,18 +133,14 @@ static struct clk_rcg mi2s_osr_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "mi2s_osr_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_mi2s_parents[] = {
- "mi2s_osr_src",
-};
-
static struct clk_branch mi2s_osr_clk = {
.halt_reg = 0x50,
.halt_bit = 1,
@@ -151,7 +150,9 @@ static struct clk_branch mi2s_osr_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "mi2s_osr_clk",
- .parent_names = lcc_mi2s_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_osr_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -166,7 +167,9 @@ static struct clk_regmap_div mi2s_div_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "mi2s_div_clk",
- .parent_names = lcc_mi2s_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_osr_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
@@ -182,7 +185,9 @@ static struct clk_branch mi2s_bit_div_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "mi2s_bit_div_clk",
- .parent_names = (const char *[]){ "mi2s_div_clk" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_div_clk.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -190,6 +195,10 @@ static struct clk_branch mi2s_bit_div_clk = {
},
};
+static const struct clk_parent_data lcc_mi2s_bit_div_codec_clk[] = {
+ { .hw = &mi2s_bit_div_clk.clkr.hw, },
+ { .fw_name = "mi2s_codec", .name = "mi2s_codec_clk" },
+};
static struct clk_regmap_mux mi2s_bit_clk = {
.reg = 0x48,
@@ -198,11 +207,8 @@ static struct clk_regmap_mux mi2s_bit_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "mi2s_bit_clk",
- .parent_names = (const char *[]){
- "mi2s_bit_div_clk",
- "mi2s_codec_clk",
- },
- .num_parents = 2,
+ .parent_data = lcc_mi2s_bit_div_codec_clk,
+ .num_parents = ARRAY_SIZE(lcc_mi2s_bit_div_codec_clk),
.ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -244,8 +250,8 @@ static struct clk_rcg pcm_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "pcm_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -261,7 +267,9 @@ static struct clk_branch pcm_clk_out = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "pcm_clk_out",
- .parent_names = (const char *[]){ "pcm_src" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcm_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -269,6 +277,11 @@ static struct clk_branch pcm_clk_out = {
},
};
+static const struct clk_parent_data lcc_pcm_clk_out_codec_clk[] = {
+ { .hw = &pcm_clk_out.clkr.hw, },
+ { .fw_name = "pcm_codec_clk", .name = "pcm_codec_clk" },
+};
+
static struct clk_regmap_mux pcm_clk = {
.reg = 0x54,
.shift = 10,
@@ -276,11 +289,8 @@ static struct clk_regmap_mux pcm_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "pcm_clk",
- .parent_names = (const char *[]){
- "pcm_clk_out",
- "pcm_codec_clk",
- },
- .num_parents = 2,
+ .parent_data = lcc_pcm_clk_out_codec_clk,
+ .num_parents = ARRAY_SIZE(lcc_pcm_clk_out_codec_clk),
.ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -324,18 +334,14 @@ static struct clk_rcg spdif_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "spdif_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_spdif_parents[] = {
- "spdif_src",
-};
-
static struct clk_branch spdif_clk = {
.halt_reg = 0xd4,
.halt_bit = 1,
@@ -345,7 +351,9 @@ static struct clk_branch spdif_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "spdif_clk",
- .parent_names = lcc_spdif_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &spdif_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -383,8 +391,8 @@ static struct clk_rcg ahbix_clk = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "ahbix",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_lcc_ops,
},
},
@@ -405,6 +413,10 @@ static struct clk_regmap *lcc_ipq806x_clks[] = {
[AHBIX_CLK] = &ahbix_clk.clkr,
};
+static const struct qcom_reset_map lcc_ipq806x_resets[] = {
+ [LCC_PCM_RESET] = { 0x54, 13 },
+};
+
static const struct regmap_config lcc_ipq806x_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -417,6 +429,8 @@ static const struct qcom_cc_desc lcc_ipq806x_desc = {
.config = &lcc_ipq806x_regmap_config,
.clks = lcc_ipq806x_clks,
.num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+ .resets = lcc_ipq806x_resets,
+ .num_resets = ARRAY_SIZE(lcc_ipq806x_resets),
};
static const struct of_device_id lcc_ipq806x_match_table[] = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index 84817cf2b6bd..3926184cc91b 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -33,7 +33,9 @@ static struct clk_pll pll4 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll4",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -49,9 +51,9 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char * const lcc_pxo_pll4[] = {
- "pxo",
- "pll4_vote",
+static const struct clk_parent_data lcc_pxo_pll4[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll4_vote", .name = "pll4_vote" },
};
static struct freq_tbl clk_tbl_aif_osr_492[] = {
@@ -86,112 +88,7 @@ static struct freq_tbl clk_tbl_aif_osr_393[] = {
{ }
};
-static struct clk_rcg mi2s_osr_src = {
- .ns_reg = 0x48,
- .md_reg = 0x4c,
- .mn = {
- .mnctr_en_bit = 8,
- .mnctr_reset_bit = 7,
- .mnctr_mode_shift = 5,
- .n_val_shift = 24,
- .m_val_shift = 8,
- .width = 8,
- },
- .p = {
- .pre_div_shift = 3,
- .pre_div_width = 2,
- },
- .s = {
- .src_sel_shift = 0,
- .parent_map = lcc_pxo_pll4_map,
- },
- .freq_tbl = clk_tbl_aif_osr_393,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(9),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_osr_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
- .ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
- },
- },
-};
-
-static const char * const lcc_mi2s_parents[] = {
- "mi2s_osr_src",
-};
-
-static struct clk_branch mi2s_osr_clk = {
- .halt_reg = 0x50,
- .halt_bit = 1,
- .halt_check = BRANCH_HALT_ENABLE,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_osr_clk",
- .parent_names = lcc_mi2s_parents,
- .num_parents = 1,
- .ops = &clk_branch_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-static struct clk_regmap_div mi2s_div_clk = {
- .reg = 0x48,
- .shift = 10,
- .width = 4,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_div_clk",
- .parent_names = lcc_mi2s_parents,
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
- },
-};
-
-static struct clk_branch mi2s_bit_div_clk = {
- .halt_reg = 0x50,
- .halt_bit = 0,
- .halt_check = BRANCH_HALT_ENABLE,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_bit_div_clk",
- .parent_names = (const char *[]){ "mi2s_div_clk" },
- .num_parents = 1,
- .ops = &clk_branch_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-static struct clk_regmap_mux mi2s_bit_clk = {
- .reg = 0x48,
- .shift = 14,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_bit_clk",
- .parent_names = (const char *[]){
- "mi2s_bit_div_clk",
- "mi2s_codec_clk",
- },
- .num_parents = 2,
- .ops = &clk_regmap_mux_closest_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
+#define CLK_AIF_OSR_SRC(prefix, _ns, _md) \
static struct clk_rcg prefix##_osr_src = { \
.ns_reg = _ns, \
.md_reg = _md, \
@@ -217,85 +114,103 @@ static struct clk_rcg prefix##_osr_src = { \
.enable_mask = BIT(9), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_osr_src", \
- .parent_names = lcc_pxo_pll4, \
- .num_parents = 2, \
+ .parent_data = lcc_pxo_pll4, \
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4), \
.ops = &clk_rcg_ops, \
.flags = CLK_SET_RATE_GATE, \
}, \
}, \
}; \
- \
-static const char * const lcc_##prefix##_parents[] = { \
- #prefix "_osr_src", \
-}; \
- \
+
+#define CLK_AIF_OSR_CLK(prefix, _ns, hr, en_bit) \
static struct clk_branch prefix##_osr_clk = { \
.halt_reg = hr, \
.halt_bit = 1, \
.halt_check = BRANCH_HALT_ENABLE, \
.clkr = { \
.enable_reg = _ns, \
- .enable_mask = BIT(21), \
+ .enable_mask = BIT(en_bit), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_osr_clk", \
- .parent_names = lcc_##prefix##_parents, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_osr_src.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_branch_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_DIV_CLK(prefix, _ns, _width) \
static struct clk_regmap_div prefix##_div_clk = { \
.reg = _ns, \
.shift = 10, \
- .width = 8, \
+ .width = _width, \
.clkr = { \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_div_clk", \
- .parent_names = lcc_##prefix##_parents, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_osr_src.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_regmap_div_ops, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_BIT_DIV_CLK(prefix, _ns, hr, en_bit) \
static struct clk_branch prefix##_bit_div_clk = { \
.halt_reg = hr, \
.halt_bit = 0, \
.halt_check = BRANCH_HALT_ENABLE, \
.clkr = { \
.enable_reg = _ns, \
- .enable_mask = BIT(19), \
+ .enable_mask = BIT(en_bit), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_bit_div_clk", \
- .parent_names = (const char *[]){ \
- #prefix "_div_clk" \
- }, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_div_clk.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_branch_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_BIT_CLK(prefix, _ns, _shift) \
static struct clk_regmap_mux prefix##_bit_clk = { \
.reg = _ns, \
- .shift = 18, \
+ .shift = _shift, \
.width = 1, \
.clkr = { \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_bit_clk", \
- .parent_names = (const char *[]){ \
- #prefix "_bit_div_clk", \
- #prefix "_codec_clk", \
+ .parent_data = (const struct clk_parent_data[]){ \
+ { .hw = &prefix##_bit_div_clk.clkr.hw, }, \
+ { .fw_name = #prefix "_codec_clk", \
+ .name = #prefix "_codec_clk", }, \
}, \
.num_parents = 2, \
.ops = &clk_regmap_mux_closest_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
-}
+};
+
+CLK_AIF_OSR_SRC(mi2s, 0x48, 0x4c)
+CLK_AIF_OSR_CLK(mi2s, 0x48, 0x50, 17)
+CLK_AIF_OSR_DIV_CLK(mi2s, 0x48, 4)
+CLK_AIF_OSR_BIT_DIV_CLK(mi2s, 0x48, 0x50, 15)
+CLK_AIF_OSR_BIT_CLK(mi2s, 0x48, 14)
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
+ CLK_AIF_OSR_SRC(prefix, _ns, _md) \
+ CLK_AIF_OSR_CLK(prefix, _ns, hr, 21) \
+ CLK_AIF_OSR_DIV_CLK(prefix, _ns, 8) \
+ CLK_AIF_OSR_BIT_DIV_CLK(prefix, _ns, hr, 19) \
+ CLK_AIF_OSR_BIT_CLK(prefix, _ns, 18)
CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
@@ -361,8 +276,8 @@ static struct clk_rcg pcm_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "pcm_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -378,7 +293,9 @@ static struct clk_branch pcm_clk_out = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "pcm_clk_out",
- .parent_names = (const char *[]){ "pcm_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pcm_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -393,9 +310,9 @@ static struct clk_regmap_mux pcm_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "pcm_clk",
- .parent_names = (const char *[]){
- "pcm_clk_out",
- "pcm_codec_clk",
+ .parent_data = (const struct clk_parent_data[]){
+ { .hw = &pcm_clk_out.clkr.hw },
+ { .fw_name = "pcm_codec_clk", .name = "pcm_codec_clk" },
},
.num_parents = 2,
.ops = &clk_regmap_mux_closest_ops,
@@ -429,18 +346,14 @@ static struct clk_rcg slimbus_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "slimbus_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_slimbus_parents[] = {
- "slimbus_src",
-};
-
static struct clk_branch audio_slimbus_clk = {
.halt_reg = 0xd4,
.halt_bit = 0,
@@ -450,7 +363,9 @@ static struct clk_branch audio_slimbus_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "audio_slimbus_clk",
- .parent_names = lcc_slimbus_parents,
+ .parent_hws = (const struct clk_hw*[]){
+ &slimbus_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -467,7 +382,9 @@ static struct clk_branch sps_slimbus_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "sps_slimbus_clk",
- .parent_names = lcc_slimbus_parents,
+ .parent_hws = (const struct clk_hw*[]){
+ &slimbus_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 6ab6e5a34c72..063e0365f311 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -12,6 +12,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <dt-bindings/clock/qcom,lpass-sc7280.h>
#include <dt-bindings/clock/qcom,lpassaudiocc-sc7280.h>
#include "clk-alpha-pll.h"
@@ -22,6 +23,7 @@
#include "clk-regmap-mux.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -38,6 +40,32 @@ static const struct pll_vco zonda_vco[] = {
{ 595200000UL, 3600000000UL, 0 },
};
+static struct clk_branch lpass_q6ss_ahbm_clk = {
+ .halt_reg = 0x901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_q6ss_ahbs_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
/* 1128.96MHz configuration */
static const struct alpha_pll_config lpass_audio_cc_pll_config = {
.l = 0x3a,
@@ -221,7 +249,7 @@ static struct clk_rcg2 lpass_aon_cc_main_rcg_clk_src = {
.parent_data = lpass_aon_cc_parent_data_0,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_0),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -614,6 +642,11 @@ static struct gdsc lpass_aon_cc_lpass_audio_hm_gdsc = {
.flags = RETAIN_FF_ENABLE,
};
+static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
+ [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
+ [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
+};
+
static struct clk_regmap *lpass_aon_cc_sc7280_clocks[] = {
[LPASS_AON_CC_AUDIO_HM_H_CLK] = &lpass_aon_cc_audio_hm_h_clk.clkr,
[LPASS_AON_CC_VA_MEM0_CLK] = &lpass_aon_cc_va_mem0_clk.clkr,
@@ -659,12 +692,30 @@ static struct regmap_config lpass_audio_cc_sc7280_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .clks = lpass_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+};
+
static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
.config = &lpass_audio_cc_sc7280_regmap_config,
.clks = lpass_audio_cc_sc7280_clocks,
.num_clks = ARRAY_SIZE(lpass_audio_cc_sc7280_clocks),
};
+static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
+};
+
+static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .resets = lpass_audio_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
+};
+
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
{ .compatible = "qcom,sc7280-lpassaudiocc" },
{ }
@@ -741,6 +792,13 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
return ret;
}
+ ret = qcom_cc_probe_by_index(pdev, 1, &lpass_audio_cc_reset_sc7280_desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC Resets\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
@@ -785,6 +843,12 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ lpass_audio_cc_sc7280_regmap_config.name = "cc";
+ desc = &lpass_cc_sc7280_desc;
+ return qcom_cc_probe(pdev, desc);
+ }
+
lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon";
lpass_audio_cc_sc7280_regmap_config.max_register = 0xa0008;
desc = &lpass_aon_cc_sc7280_desc;
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index b39ee1c9647b..5c1e17bd0d76 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -17,32 +17,6 @@
#include "clk-branch.h"
#include "common.h"
-static struct clk_branch lpass_q6ss_ahbm_clk = {
- .halt_reg = 0x1c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "lpass_q6ss_ahbm_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch lpass_q6ss_ahbs_clk = {
- .halt_reg = 0x20,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x20,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "lpass_q6ss_ahbs_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch lpass_top_cc_lpi_q6_axim_hs_clk = {
.halt_reg = 0x0,
.halt_check = BRANCH_HALT,
@@ -105,17 +79,6 @@ static struct regmap_config lpass_regmap_config = {
.fast_io = true,
};
-static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
- [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
- [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
-};
-
-static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
- .config = &lpass_regmap_config,
- .clks = lpass_cc_sc7280_clocks,
- .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
-};
-
static struct clk_regmap *lpass_cc_top_sc7280_clocks[] = {
[LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK] =
&lpass_top_cc_lpi_q6_axim_hs_clk.clkr,
@@ -169,13 +132,6 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
if (ret)
goto destroy_pm_clk;
- lpass_regmap_config.name = "cc";
- desc = &lpass_cc_sc7280_desc;
-
- ret = qcom_cc_probe_by_index(pdev, 2, desc);
- if (ret)
- goto destroy_pm_clk;
-
return 0;
destroy_pm_clk:
diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
index 1f1f1bd1b68e..6ad19b06b1ce 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7280.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
@@ -190,6 +190,19 @@ static struct clk_rcg2 lpass_core_cc_ext_if1_clk_src = {
},
};
+static struct clk_rcg2 lpass_core_cc_ext_mclk0_clk_src = {
+ .cmd_rcgr = 0x20000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_mclk0_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
static struct clk_branch lpass_core_cc_core_clk = {
.halt_reg = 0x1f000,
@@ -283,6 +296,24 @@ static struct clk_branch lpass_core_cc_lpm_mem0_core_clk = {
},
};
+static struct clk_branch lpass_core_cc_ext_mclk0_clk = {
+ .halt_reg = 0x20014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_ext_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch lpass_core_cc_sysnoc_mport_core_clk = {
.halt_reg = 0x23000,
.halt_check = BRANCH_HALT_VOTED,
@@ -326,6 +357,8 @@ static struct clk_regmap *lpass_core_cc_sc7280_clocks[] = {
[LPASS_CORE_CC_LPM_CORE_CLK] = &lpass_core_cc_lpm_core_clk.clkr,
[LPASS_CORE_CC_LPM_MEM0_CORE_CLK] = &lpass_core_cc_lpm_mem0_core_clk.clkr,
[LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK] = &lpass_core_cc_sysnoc_mport_core_clk.clkr,
+ [LPASS_CORE_CC_EXT_MCLK0_CLK] = &lpass_core_cc_ext_mclk0_clk.clkr,
+ [LPASS_CORE_CC_EXT_MCLK0_CLK_SRC] = &lpass_core_cc_ext_mclk0_clk_src.clkr,
};
static struct regmap_config lpass_core_cc_sc7280_regmap_config = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index aaaad65b6458..6bf908a51f53 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -41,70 +41,6 @@ enum {
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
-static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2[] = {
- "pxo",
- "pll8_vote",
- "pll2",
-};
-
-static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 },
- { P_PLL3, 3 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2_pll15[] = {
- "pxo",
- "pll8_vote",
- "pll2",
- "pll15",
-};
-
-static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 },
- { P_PLL15, 3 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2_pll3[] = {
- "pxo",
- "pll8_vote",
- "pll2",
- "pll3",
-};
-
-static const struct parent_map mmcc_pxo_dsi2_dsi1_map[] = {
- { P_PXO, 0 },
- { P_DSI2_PLL_DSICLK, 1 },
- { P_DSI1_PLL_DSICLK, 3 },
-};
-
-static const char * const mmcc_pxo_dsi2_dsi1[] = {
- "pxo",
- "dsi2pll",
- "dsi1pll",
-};
-
-static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
- { P_PXO, 0 },
- { P_DSI1_PLL_BYTECLK, 1 },
- { P_DSI2_PLL_BYTECLK, 2 },
-};
-
-static const char * const mmcc_pxo_dsi1_dsi2_byte[] = {
- "pxo",
- "dsi1pllbyte",
- "dsi2pllbyte",
-};
-
static struct clk_pll pll2 = {
.l_reg = 0x320,
.m_reg = 0x324,
@@ -115,7 +51,9 @@ static struct clk_pll pll2 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll2",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -131,7 +69,9 @@ static struct clk_pll pll15 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll15",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -151,6 +91,70 @@ static const struct pll_config pll15_config = {
.main_output_mask = BIT(23),
};
+static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+};
+
+static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL3, 3 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2_pll15[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+ { .hw = &pll15.clkr.hw },
+};
+
+static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL15, 3 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2_pll3[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+ { .fw_name = "pll3", .name = "pll3" },
+};
+
+static const struct parent_map mmcc_pxo_dsi2_dsi1_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
+static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
+ { P_PXO, 0 },
+ { P_DSI1_PLL_BYTECLK, 1 },
+ { P_DSI2_PLL_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi1_dsi2_byte[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" },
+ { .fw_name = "dsi2pllbyte", .name = "dsi2pllbyte" },
+};
+
static struct freq_tbl clk_tbl_cam[] = {
{ 6000000, P_PLL8, 4, 1, 16 },
{ 8000000, P_PLL8, 4, 1, 12 },
@@ -192,8 +196,8 @@ static struct clk_rcg camclk0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -207,7 +211,9 @@ static struct clk_branch camclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk0_clk",
- .parent_names = (const char *[]){ "camclk0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -241,8 +247,8 @@ static struct clk_rcg camclk1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -256,7 +262,9 @@ static struct clk_branch camclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk1_clk",
- .parent_names = (const char *[]){ "camclk1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -290,8 +298,8 @@ static struct clk_rcg camclk2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk2_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -305,7 +313,9 @@ static struct clk_branch camclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk2_clk",
- .parent_names = (const char *[]){ "camclk2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -345,8 +355,8 @@ static struct clk_rcg csi0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -359,7 +369,9 @@ static struct clk_branch csi0_clk = {
.enable_reg = 0x0040,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi0_clk",
.ops = &clk_branch_ops,
@@ -375,7 +387,9 @@ static struct clk_branch csi0_phy_clk = {
.enable_reg = 0x0040,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi0_phy_clk",
.ops = &clk_branch_ops,
@@ -409,8 +423,8 @@ static struct clk_rcg csi1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -423,7 +437,9 @@ static struct clk_branch csi1_clk = {
.enable_reg = 0x0024,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi1_clk",
.ops = &clk_branch_ops,
@@ -439,7 +455,9 @@ static struct clk_branch csi1_phy_clk = {
.enable_reg = 0x0024,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi1_phy_clk",
.ops = &clk_branch_ops,
@@ -473,8 +491,8 @@ static struct clk_rcg csi2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi2_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -487,7 +505,9 @@ static struct clk_branch csi2_clk = {
.enable_reg = 0x022c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi2_clk",
.ops = &clk_branch_ops,
@@ -503,7 +523,9 @@ static struct clk_branch csi2_phy_clk = {
.enable_reg = 0x022c,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi2_phy_clk",
.ops = &clk_branch_ops,
@@ -602,10 +624,10 @@ static const struct clk_ops clk_ops_pix_rdi = {
.determine_rate = __clk_mux_determine_rate,
};
-static const char * const pix_rdi_parents[] = {
- "csi0_clk",
- "csi1_clk",
- "csi2_clk",
+static const struct clk_hw *pix_rdi_parents[] = {
+ &csi0_clk.clkr.hw,
+ &csi1_clk.clkr.hw,
+ &csi2_clk.clkr.hw,
};
static struct clk_pix_rdi csi_pix_clk = {
@@ -618,8 +640,8 @@ static struct clk_pix_rdi csi_pix_clk = {
.enable_mask = BIT(26),
.hw.init = &(struct clk_init_data){
.name = "csi_pix_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -635,8 +657,8 @@ static struct clk_pix_rdi csi_pix1_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "csi_pix1_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -652,8 +674,8 @@ static struct clk_pix_rdi csi_rdi_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -669,8 +691,8 @@ static struct clk_pix_rdi csi_rdi1_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi1_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -686,8 +708,8 @@ static struct clk_pix_rdi csi_rdi2_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi2_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -725,15 +747,13 @@ static struct clk_rcg csiphytimer_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csiphytimer_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
};
-static const char * const csixphy_timer_src[] = { "csiphytimer_src" };
-
static struct clk_branch csiphy0_timer_clk = {
.halt_reg = 0x01e8,
.halt_bit = 17,
@@ -741,7 +761,9 @@ static struct clk_branch csiphy0_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy0_timer_clk",
.ops = &clk_branch_ops,
@@ -757,7 +779,9 @@ static struct clk_branch csiphy1_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy1_timer_clk",
.ops = &clk_branch_ops,
@@ -773,7 +797,9 @@ static struct clk_branch csiphy2_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy2_timer_clk",
.ops = &clk_branch_ops,
@@ -835,8 +861,8 @@ static struct clk_dyn_rcg gfx2d0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -850,7 +876,9 @@ static struct clk_branch gfx2d0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_clk",
- .parent_names = (const char *[]){ "gfx2d0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx2d0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -895,8 +923,8 @@ static struct clk_dyn_rcg gfx2d1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -910,7 +938,9 @@ static struct clk_branch gfx2d1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_clk",
- .parent_names = (const char *[]){ "gfx2d1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx2d1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -996,8 +1026,8 @@ static struct clk_dyn_rcg gfx3d_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx3d_src",
- .parent_names = mmcc_pxo_pll8_pll2_pll3,
- .num_parents = 4,
+ .parent_data = mmcc_pxo_pll8_pll2_pll3,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2_pll3),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1005,8 +1035,8 @@ static struct clk_dyn_rcg gfx3d_src = {
static const struct clk_init_data gfx3d_8064_init = {
.name = "gfx3d_src",
- .parent_names = mmcc_pxo_pll8_pll2_pll15,
- .num_parents = 4,
+ .parent_data = mmcc_pxo_pll8_pll2_pll15,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2_pll15),
.ops = &clk_dyn_rcg_ops,
};
@@ -1018,7 +1048,9 @@ static struct clk_branch gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk",
- .parent_names = (const char *[]){ "gfx3d_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1074,8 +1106,8 @@ static struct clk_dyn_rcg vcap_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vcap_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1089,7 +1121,9 @@ static struct clk_branch vcap_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vcap_clk",
- .parent_names = (const char *[]){ "vcap_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcap_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1105,7 +1139,9 @@ static struct clk_branch vcap_npl_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "vcap_npl_clk",
- .parent_names = (const char *[]){ "vcap_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcap_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1153,8 +1189,8 @@ static struct clk_rcg ijpeg_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "ijpeg_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1168,7 +1204,9 @@ static struct clk_branch ijpeg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "ijpeg_clk",
- .parent_names = (const char *[]){ "ijpeg_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ijpeg_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1201,8 +1239,8 @@ static struct clk_rcg jpegd_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "jpegd_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1216,7 +1254,9 @@ static struct clk_branch jpegd_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "jpegd_clk",
- .parent_names = (const char *[]){ "jpegd_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpegd_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1281,8 +1321,8 @@ static struct clk_dyn_rcg mdp_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "mdp_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1296,7 +1336,9 @@ static struct clk_branch mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_clk",
- .parent_names = (const char *[]){ "mdp_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1311,7 +1353,9 @@ static struct clk_branch mdp_lut_clk = {
.enable_reg = 0x016c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "mdp_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_src.clkr.hw
+ },
.num_parents = 1,
.name = "mdp_lut_clk",
.ops = &clk_branch_ops,
@@ -1328,7 +1372,9 @@ static struct clk_branch mdp_vsync_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "mdp_vsync_clk",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_branch_ops
},
@@ -1380,8 +1426,8 @@ static struct clk_dyn_rcg rot_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "rot_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1395,7 +1441,9 @@ static struct clk_branch rot_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "rot_clk",
- .parent_names = (const char *[]){ "rot_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rot_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1408,9 +1456,9 @@ static const struct parent_map mmcc_pxo_hdmi_map[] = {
{ P_HDMI_PLL, 3 }
};
-static const char * const mmcc_pxo_hdmi[] = {
- "pxo",
- "hdmi_pll",
+static const struct clk_parent_data mmcc_pxo_hdmi[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "hdmipll", .name = "hdmi_pll" },
};
static struct freq_tbl clk_tbl_tv[] = {
@@ -1443,16 +1491,14 @@ static struct clk_rcg tv_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "tv_src",
- .parent_names = mmcc_pxo_hdmi,
- .num_parents = 2,
+ .parent_data = mmcc_pxo_hdmi,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_hdmi),
.ops = &clk_rcg_bypass_ops,
.flags = CLK_SET_RATE_PARENT,
},
},
};
-static const char * const tv_src_name[] = { "tv_src" };
-
static struct clk_branch tv_enc_clk = {
.halt_reg = 0x01d4,
.halt_bit = 9,
@@ -1460,7 +1506,9 @@ static struct clk_branch tv_enc_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "tv_enc_clk",
.ops = &clk_branch_ops,
@@ -1476,7 +1524,9 @@ static struct clk_branch tv_dac_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "tv_dac_clk",
.ops = &clk_branch_ops,
@@ -1492,7 +1542,9 @@ static struct clk_branch mdp_tv_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "mdp_tv_clk",
.ops = &clk_branch_ops,
@@ -1508,7 +1560,9 @@ static struct clk_branch hdmi_tv_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "hdmi_tv_clk",
.ops = &clk_branch_ops,
@@ -1524,7 +1578,9 @@ static struct clk_branch rgb_tv_clk = {
.enable_reg = 0x0124,
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "rgb_tv_clk",
.ops = &clk_branch_ops,
@@ -1540,7 +1596,9 @@ static struct clk_branch npl_tv_clk = {
.enable_reg = 0x0124,
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "npl_tv_clk",
.ops = &clk_branch_ops,
@@ -1556,7 +1614,9 @@ static struct clk_branch hdmi_app_clk = {
.enable_reg = 0x005c,
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.name = "hdmi_app_clk",
.ops = &clk_branch_ops,
@@ -1614,8 +1674,8 @@ static struct clk_dyn_rcg vcodec_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vcodec_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1629,7 +1689,9 @@ static struct clk_branch vcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vcodec_clk",
- .parent_names = (const char *[]){ "vcodec_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1665,8 +1727,8 @@ static struct clk_rcg vpe_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vpe_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1680,7 +1742,9 @@ static struct clk_branch vpe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpe_clk",
- .parent_names = (const char *[]){ "vpe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vpe_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1733,8 +1797,8 @@ static struct clk_rcg vfe_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vfe_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1748,7 +1812,9 @@ static struct clk_branch vfe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vfe_clk",
- .parent_names = (const char *[]){ "vfe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1763,7 +1829,9 @@ static struct clk_branch vfe_csi_clk = {
.enable_reg = 0x0104,
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "vfe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe_src.clkr.hw
+ },
.num_parents = 1,
.name = "vfe_csi_clk",
.ops = &clk_branch_ops,
@@ -2067,8 +2135,8 @@ static struct clk_rcg dsi1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2083,7 +2151,9 @@ static struct clk_branch dsi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_clk",
- .parent_names = (const char *[]){ "dsi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2115,8 +2185,8 @@ static struct clk_rcg dsi2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2131,7 +2201,9 @@ static struct clk_branch dsi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_clk",
- .parent_names = (const char *[]){ "dsi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2154,8 +2226,8 @@ static struct clk_rcg dsi1_byte_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_byte_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2170,7 +2242,9 @@ static struct clk_branch dsi1_byte_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_byte_clk",
- .parent_names = (const char *[]){ "dsi1_byte_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_byte_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2193,8 +2267,8 @@ static struct clk_rcg dsi2_byte_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_byte_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2209,7 +2283,9 @@ static struct clk_branch dsi2_byte_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_byte_clk",
- .parent_names = (const char *[]){ "dsi2_byte_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_byte_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2232,8 +2308,8 @@ static struct clk_rcg dsi1_esc_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_esc_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_esc_ops,
},
},
@@ -2247,7 +2323,9 @@ static struct clk_branch dsi1_esc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_esc_clk",
- .parent_names = (const char *[]){ "dsi1_esc_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_esc_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2270,8 +2348,8 @@ static struct clk_rcg dsi2_esc_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_esc_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_esc_ops,
},
},
@@ -2285,7 +2363,9 @@ static struct clk_branch dsi2_esc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_esc_clk",
- .parent_names = (const char *[]){ "dsi2_esc_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_esc_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2317,8 +2397,8 @@ static struct clk_rcg dsi1_pixel_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_pixel_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_pixel_ops,
},
},
@@ -2332,7 +2412,9 @@ static struct clk_branch dsi1_pixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk1_clk",
- .parent_names = (const char *[]){ "dsi1_pixel_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_pixel_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2364,8 +2446,8 @@ static struct clk_rcg dsi2_pixel_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_pixel_ops,
},
},
@@ -2379,7 +2461,9 @@ static struct clk_branch dsi2_pixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
- .parent_names = (const char *[]){ "dsi2_pixel_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 819d194be8f7..2a16adb572d2 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -13,8 +13,10 @@
static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
+ struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev);
+
rcdev->ops->assert(rcdev, id);
- udelay(1);
+ udelay(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
rcdev->ops->deassert(rcdev, id);
return 0;
}
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 2a08b5e282c7..b8c113582072 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -11,6 +11,7 @@
struct qcom_reset_map {
unsigned int reg;
u8 bit;
+ u8 udelay;
};
struct regmap;
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index cd80b6084ece..4baf355e26d8 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -108,7 +108,13 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED("cbfusa", R8A779F0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
- DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_FIXED("sasyncrt", R8A779F0_CLK_SASYNCRT, CLK_PLL5_DIV4, 48, 1),
+ DEF_FIXED("sasyncperd1", R8A779F0_CLK_SASYNCPERD1, CLK_PLL5_DIV4, 3, 1),
+ DEF_FIXED("sasyncperd2", R8A779F0_CLK_SASYNCPERD2, R8A779F0_CLK_SASYNCPERD1, 2, 1),
+ DEF_FIXED("sasyncperd4", R8A779F0_CLK_SASYNCPERD4, R8A779F0_CLK_SASYNCPERD1, 4, 1),
+
+ DEF_GEN4_SDH("sdh0", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870),
DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
@@ -130,6 +136,10 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("i2c3", 521, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c4", 522, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("msiof0", 618, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof1", 619, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof2", 620, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof3", 621, R8A779F0_CLK_MSO),
DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
@@ -139,7 +149,16 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
+ DEF_MOD("tmu0", 713, R8A779F0_CLK_SASYNCRT),
+ DEF_MOD("tmu1", 714, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu2", 715, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu3", 716, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu4", 717, R8A779F0_CLK_SASYNCPERD2),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
+ DEF_MOD("cmt0", 910, R8A779F0_CLK_R),
+ DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
+ DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
+ DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 3fc4233b1ead..9641122133b5 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -150,10 +150,24 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC),
+ DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC),
+ DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER),
+ DEF_MOD("i2c0", 518, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c1", 519, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c2", 520, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c3", 521, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R),
+ DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
};
/*
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index fd7c4eecd398..02a4fc41bb6e 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -414,6 +414,7 @@ static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
};
+#ifdef CONFIG_CLK_R9A07G044
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Core Clocks */
.core_clks = core_clks.common,
@@ -436,6 +437,7 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
.has_clk_mon_regs = true,
};
+#endif
#ifdef CONFIG_CLK_R9A07G054
const struct rzg2l_cpg_info r9a07g054_cpg_info = {
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index b21915cf6648..fbef1b35d254 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -132,6 +132,8 @@ static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
+ DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
@@ -143,6 +145,8 @@ static const struct rzg2l_reset r9a09g011_resets[] = {
DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST(R9A09G011_IIC_GPA_PRESETN, 0x614, 8),
+ DEF_RST(R9A09G011_IIC_GPB_PRESETN, 0x614, 9),
DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
};
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 3067bdb6e119..345a5d2a457c 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -23,6 +23,13 @@ config CLK_RV110X
help
Build the driver for RV110x Clock Driver.
+config CLK_RV1126
+ bool "Rockchip RV1126 clock controller support"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Build the driver for RV1126 Clock Driver.
+
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
depends on ARM || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 2b78f1247372..e8543876c056 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -17,6 +17,7 @@ clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
obj-$(CONFIG_CLK_RV110X) += clk-rv1108.o
+obj-$(CONFIG_CLK_RV1126) += clk-rv1126.o
obj-$(CONFIG_CLK_RK3036) += clk-rk3036.o
obj-$(CONFIG_CLK_RK312X) += clk-rk3128.o
obj-$(CONFIG_CLK_RK3188) += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
new file mode 100644
index 000000000000..c18790f5d05b
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rv1126-cru.h>
+#include "clk.h"
+
+#define RV1126_GMAC_CON 0x460
+#define RV1126_GRF_IOFUNC_CON1 0x10264
+#define RV1126_GRF_SOC_STATUS0 0x10
+
+#define RV1126_FRAC_MAX_PRATE 1200000000
+#define RV1126_CSIOUT_FRAC_MAX_PRATE 300000000
+
+enum rv1126_pmu_plls {
+ gpll,
+};
+
+enum rv1126_plls {
+ apll, dpll, cpll, hpll,
+};
+
+static struct rockchip_pll_rate_table rv1126_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 132, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 130, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 128, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 126, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 124, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 122, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 120, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 118, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1400000000, 3, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 116, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 114, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 112, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 110, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 108, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 106, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 104, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 92, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 3, 275, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 1, 75, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0),
+ RK3036_PLL_RATE(624000000, 1, 104, 4, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 84, 4, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RV1126_DIV_ACLK_CORE_MASK 0xf
+#define RV1126_DIV_ACLK_CORE_SHIFT 4
+#define RV1126_DIV_PCLK_DBG_MASK 0x7
+#define RV1126_DIV_PCLK_DBG_SHIFT 0
+
+#define RV1126_CLKSEL1(_aclk_core, _pclk_dbg) \
+{ \
+ .reg = RV1126_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_aclk_core, RV1126_DIV_ACLK_CORE_MASK, \
+ RV1126_DIV_ACLK_CORE_SHIFT) | \
+ HIWORD_UPDATE(_pclk_dbg, RV1126_DIV_PCLK_DBG_MASK, \
+ RV1126_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define RV1126_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RV1126_CLKSEL1(_aclk_core, _pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rv1126_cpuclk_rates[] __initdata = {
+ RV1126_CPUCLK_RATE(1608000000, 1, 7),
+ RV1126_CPUCLK_RATE(1584000000, 1, 7),
+ RV1126_CPUCLK_RATE(1560000000, 1, 7),
+ RV1126_CPUCLK_RATE(1536000000, 1, 7),
+ RV1126_CPUCLK_RATE(1512000000, 1, 7),
+ RV1126_CPUCLK_RATE(1488000000, 1, 5),
+ RV1126_CPUCLK_RATE(1464000000, 1, 5),
+ RV1126_CPUCLK_RATE(1440000000, 1, 5),
+ RV1126_CPUCLK_RATE(1416000000, 1, 5),
+ RV1126_CPUCLK_RATE(1392000000, 1, 5),
+ RV1126_CPUCLK_RATE(1368000000, 1, 5),
+ RV1126_CPUCLK_RATE(1344000000, 1, 5),
+ RV1126_CPUCLK_RATE(1320000000, 1, 5),
+ RV1126_CPUCLK_RATE(1296000000, 1, 5),
+ RV1126_CPUCLK_RATE(1272000000, 1, 5),
+ RV1126_CPUCLK_RATE(1248000000, 1, 5),
+ RV1126_CPUCLK_RATE(1224000000, 1, 5),
+ RV1126_CPUCLK_RATE(1200000000, 1, 5),
+ RV1126_CPUCLK_RATE(1104000000, 1, 5),
+ RV1126_CPUCLK_RATE(1008000000, 1, 5),
+ RV1126_CPUCLK_RATE(912000000, 1, 5),
+ RV1126_CPUCLK_RATE(816000000, 1, 3),
+ RV1126_CPUCLK_RATE(696000000, 1, 3),
+ RV1126_CPUCLK_RATE(600000000, 1, 3),
+ RV1126_CPUCLK_RATE(408000000, 1, 1),
+ RV1126_CPUCLK_RATE(312000000, 1, 1),
+ RV1126_CPUCLK_RATE(216000000, 1, 1),
+ RV1126_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data rv1126_cpuclk_data = {
+ .core_reg[0] = RV1126_CLKSEL_CON(0),
+ .div_core_shift[0] = 0,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 0,
+ .mux_core_main = 2,
+ .mux_core_shift = 6,
+ .mux_core_mask = 0x3,
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_rtc32k_p) = { "clk_pmupvtm_divout", "xin32k", "clk_osc0_div32k" };
+PNAME(mux_wifi_p) = { "clk_wifi_osc0", "clk_wifi_div" };
+PNAME(mux_gpll_usb480m_cpll_xin24m_p) = { "gpll", "usb480m", "cpll", "xin24m" };
+PNAME(mux_uart1_p) = { "sclk_uart1_div", "sclk_uart1_fracdiv", "xin24m" };
+PNAME(mux_xin24m_gpll_p) = { "xin24m", "gpll" };
+PNAME(mux_gpll_xin24m_p) = { "gpll", "xin24m" };
+PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" };
+PNAME(mux_usbphy_otg_ref_p) = { "clk_ref12m", "xin_osc0_div2_usbphyref_otg" };
+PNAME(mux_usbphy_host_ref_p) = { "clk_ref12m", "xin_osc0_div2_usbphyref_host" };
+PNAME(mux_mipidsiphy_ref_p) = { "clk_ref24m", "xin_osc0_mipiphyref" };
+PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k" };
+PNAME(mux_armclk_p) = { "gpll", "cpll", "apll" };
+PNAME(mux_gpll_cpll_dpll_p) = { "gpll", "cpll", "dummy_dpll" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_hclk_pclk_pdbus_p) = { "gpll", "dummy_cpll" };
+PNAME(mux_gpll_cpll_usb480m_xin24m_p) = { "gpll", "cpll", "usb480m", "xin24m" };
+PNAME(mux_uart0_p) = { "sclk_uart0_div", "sclk_uart0_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "sclk_uart2_div", "sclk_uart2_frac", "xin24m" };
+PNAME(mux_uart3_p) = { "sclk_uart3_div", "sclk_uart3_frac", "xin24m" };
+PNAME(mux_uart4_p) = { "sclk_uart4_div", "sclk_uart4_frac", "xin24m" };
+PNAME(mux_uart5_p) = { "sclk_uart5_div", "sclk_uart5_frac", "xin24m" };
+PNAME(mux_cpll_gpll_p) = { "cpll", "gpll" };
+PNAME(mux_i2s0_tx_p) = { "mclk_i2s0_tx_div", "mclk_i2s0_tx_fracdiv", "i2s0_mclkin", "xin12m" };
+PNAME(mux_i2s0_rx_p) = { "mclk_i2s0_rx_div", "mclk_i2s0_rx_fracdiv", "i2s0_mclkin", "xin12m" };
+PNAME(mux_i2s0_tx_out2io_p) = { "mclk_i2s0_tx", "xin12m" };
+PNAME(mux_i2s0_rx_out2io_p) = { "mclk_i2s0_rx", "xin12m" };
+PNAME(mux_i2s1_p) = { "mclk_i2s1_div", "mclk_i2s1_fracdiv", "i2s1_mclkin", "xin12m" };
+PNAME(mux_i2s1_out2io_p) = { "mclk_i2s1", "xin12m" };
+PNAME(mux_i2s2_p) = { "mclk_i2s2_div", "mclk_i2s2_fracdiv", "i2s2_mclkin", "xin12m" };
+PNAME(mux_i2s2_out2io_p) = { "mclk_i2s2", "xin12m" };
+PNAME(mux_gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_audpwm_p) = { "sclk_audpwm_div", "sclk_audpwm_fracdiv", "xin24m" };
+PNAME(mux_usb480m_gpll_p) = { "usb480m", "gpll" };
+PNAME(clk_gmac_src_m0_p) = { "clk_gmac_div", "clk_gmac_rgmii_m0" };
+PNAME(clk_gmac_src_m1_p) = { "clk_gmac_div", "clk_gmac_rgmii_m1" };
+PNAME(mux_clk_gmac_src_p) = { "clk_gmac_src_m0", "clk_gmac_src_m1" };
+PNAME(mux_rgmii_clk_p) = { "clk_gmac_tx_div50", "clk_gmac_tx_div5", "clk_gmac_tx_src", "clk_gmac_tx_src"};
+PNAME(mux_rmii_clk_p) = { "clk_gmac_rx_div20", "clk_gmac_rx_div2" };
+PNAME(mux_gmac_tx_rx_p) = { "rgmii_mode_clk", "rmii_mode_clk" };
+PNAME(mux_dpll_gpll_p) = { "dpll", "gpll" };
+
+static u32 rgmii_mux_idx[] = { 2, 3, 0, 1 };
+
+static struct rockchip_pll_clock rv1126_pmu_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ 0, RV1126_PMU_PLL_CON(0),
+ RV1126_PMU_MODE, 0, 3, 0, rv1126_pll_rates),
+};
+
+static struct rockchip_pll_clock rv1126_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, RV1126_PLL_CON(0),
+ RV1126_MODE_CON, 0, 0, 0, rv1126_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ 0, RV1126_PLL_CON(8),
+ RV1126_MODE_CON, 2, 1, 0, NULL),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, RV1126_PLL_CON(16),
+ RV1126_MODE_CON, 4, 2, 0, rv1126_pll_rates),
+ [hpll] = PLL(pll_rk3328, PLL_HPLL, "hpll", mux_pll_p,
+ 0, RV1126_PLL_CON(24),
+ RV1126_MODE_CON, 6, 4, 0, rv1126_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rv1126_rtc32k_fracmux __initdata =
+ MUX(CLK_RTC32K, "clk_rtc32k", mux_rtc32k_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(0), 7, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart1_fracmux __initdata =
+ MUX(SCLK_UART1_MUX, "sclk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(4), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart0_fracmux __initdata =
+ MUX(SCLK_UART0_MUX, "sclk_uart0_mux", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(10), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart2_fracmux __initdata =
+ MUX(SCLK_UART2_MUX, "sclk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(12), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart3_fracmux __initdata =
+ MUX(SCLK_UART3_MUX, "sclk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(14), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart4_fracmux __initdata =
+ MUX(SCLK_UART4_MUX, "sclk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(16), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart5_fracmux __initdata =
+ MUX(SCLK_UART5_MUX, "sclk_uart5_mux", mux_uart5_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(18), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s0_tx_fracmux __initdata =
+ MUX(MCLK_I2S0_TX_MUX, "mclk_i2s0_tx_mux", mux_i2s0_tx_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(30), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s0_rx_fracmux __initdata =
+ MUX(MCLK_I2S0_RX_MUX, "mclk_i2s0_rx_mux", mux_i2s0_rx_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(30), 2, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s1_fracmux __initdata =
+ MUX(MCLK_I2S1_MUX, "mclk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(31), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s2_fracmux __initdata =
+ MUX(MCLK_I2S2_MUX, "mclk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(33), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_audpwm_fracmux __initdata =
+ MUX(SCLK_AUDPWM_MUX, "mclk_audpwm_mux", mux_audpwm_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(36), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_clk_pmu_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+ /* PD_PMU */
+ COMPOSITE_NOMUX(PCLK_PDPMU, "pclk_pdpmu", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ COMPOSITE_FRACMUX(CLK_OSC0_DIV32K, "clk_osc0_div32k", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKSEL_CON(13), 0,
+ RV1126_PMU_CLKGATE_CON(2), 9, GFLAGS,
+ &rv1126_rtc32k_fracmux),
+
+ COMPOSITE_NOMUX(CLK_WIFI_DIV, "clk_wifi_div", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(12), 0, 6, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(2), 10, GFLAGS),
+ GATE(CLK_WIFI_OSC0, "clk_wifi_osc0", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 11, GFLAGS),
+ MUX(CLK_WIFI, "clk_wifi", mux_wifi_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(12), 8, 1, MFLAGS),
+
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 1, GFLAGS),
+
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE(SCLK_UART1_DIV, "sclk_uart1_div", mux_gpll_usb480m_cpll_xin24m_p, 0,
+ RV1126_PMU_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART1_FRACDIV, "sclk_uart1_fracdiv", "sclk_uart1_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(5), 0,
+ RV1126_PMU_CLKGATE_CON(0), 13, GFLAGS,
+ &rv1126_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "sclk_uart1_mux", 0,
+ RV1126_PMU_CLKGATE_CON(0), 14, GFLAGS),
+
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C0, "clk_i2c0", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(2), 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C2, "clk_i2c2", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(3), 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 10, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM0, "clk_capture_pwm0", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PWM0, "clk_pwm0", mux_xin24m_gpll_p, 0,
+ RV1126_PMU_CLKSEL_CON(6), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_CAPTURE_PWM1, "clk_capture_pwm1", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", mux_xin24m_gpll_p, 0,
+ RV1126_PMU_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 4, GFLAGS),
+
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE(CLK_SPI0, "clk_spi0", mux_gpll_xin24m_p, 0,
+ RV1126_PMU_CLKSEL_CON(9), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 12, GFLAGS),
+
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_xin24m_32k_p, 0,
+ RV1126_PMU_CLKSEL_CON(8), 15, 1, MFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 10, GFLAGS),
+
+ GATE(PCLK_PMUPVTM, "pclk_pmupvtm", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(2), 6, GFLAGS),
+ GATE(CLK_PMUPVTM, "clk_pmupvtm", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(CLK_CORE_PMUPVTM, "clk_core_pmupvtm", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_REF12M, "clk_ref12m", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(7), 8, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(0, "xin_osc0_usbphyref_otg", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "xin_osc0_usbphyref_host", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ FACTOR(0, "xin_osc0_div2_usbphyref_otg", "xin_osc0_usbphyref_otg", 0, 1, 2),
+ FACTOR(0, "xin_osc0_div2_usbphyref_host", "xin_osc0_usbphyref_host", 0, 1, 2),
+ MUX(CLK_USBPHY_OTG_REF, "clk_usbphy_otg_ref", mux_usbphy_otg_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 6, 1, MFLAGS),
+ MUX(CLK_USBPHY_HOST_REF, "clk_usbphy_host_ref", mux_usbphy_host_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 7, 1, MFLAGS),
+
+ COMPOSITE_NOMUX(CLK_REF24M, "clk_ref24m", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(7), 0, 6, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(0, "xin_osc0_mipiphyref", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ MUX(CLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", mux_mipidsiphy_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 15, 1, MFLAGS),
+
+ GATE(CLK_PMU, "clk_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 15, GFLAGS),
+
+ GATE(PCLK_PMUSGRF, "pclk_pmusgrf", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_PMUCRU, "pclk_pmucru", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_CHIPVEROTP, "pclk_chipverotp", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(PCLK_PDPMU_NIU, "pclk_pdpmu_niu", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 2, GFLAGS),
+
+ GATE(PCLK_SCRKEYGEN, "pclk_scrkeygen", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 7, GFLAGS),
+};
+
+static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+ MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RV1126_MODE_CON, 10, 2, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RV1126_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(CLK_CORE_CPUPVTM, "clk_core_cpupvtm", "armclk", 0,
+ RV1126_CLKGATE_CON(0), 12, GFLAGS),
+ GATE(PCLK_CPUPVTM, "pclk_cpupvtm", "pclk_dbg", 0,
+ RV1126_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(CLK_CPUPVTM, "clk_cpupvtm", "xin24m", 0,
+ RV1126_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PDCORE_NIU, "hclk_pdcore_niu", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(0), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(0), 8, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ /* PD_BUS */
+ COMPOSITE(0, "aclk_pdbus_pre", mux_gpll_cpll_dpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(2), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_PDBUS, "aclk_pdbus", "aclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE(0, "hclk_pdbus_pre", mux_hclk_pclk_pdbus_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(2), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(HCLK_PDBUS, "hclk_pdbus", "hclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE(0, "pclk_pdbus_pre", mux_hclk_pclk_pdbus_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(3), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(PCLK_PDBUS, "pclk_pdbus", "pclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 13, GFLAGS),
+ /* aclk_dmac is controlled by sgrf_clkgat_con. */
+ SGRF_GATE(ACLK_DMAC, "aclk_dmac", "hclk_pdbus"),
+ GATE(ACLK_DCF, "aclk_dcf", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 6, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 10, GFLAGS),
+
+ COMPOSITE(CLK_SCR1, "clk_scr1", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(3), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(0, "clk_scr1_niu", "clk_scr1", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 14, GFLAGS),
+ GATE(CLK_SCR1_CORE, "clk_scr1_core", "clk_scr1", 0,
+ RV1126_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(CLK_SCR1_RTC, "clk_scr1_rtc", "xin24m", 0,
+ RV1126_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(CLK_SCR1_JTAG, "clk_scr1_jtag", "clk_scr1_jtag_io", 0,
+ RV1126_CLKGATE_CON(4), 10, GFLAGS),
+
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE(SCLK_UART0_DIV, "sclk_uart0_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(10), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART0_FRAC, "sclk_uart0_frac", "sclk_uart0_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(11), 0,
+ RV1126_CLKGATE_CON(5), 2, GFLAGS,
+ &rv1126_uart0_fracmux),
+ GATE(SCLK_UART0, "sclk_uart0", "sclk_uart0_mux", 0,
+ RV1126_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 4, GFLAGS),
+ COMPOSITE(SCLK_UART2_DIV, "sclk_uart2_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(12), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 5, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART2_FRAC, "sclk_uart2_frac", "sclk_uart2_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(13), 0,
+ RV1126_CLKGATE_CON(5), 6, GFLAGS,
+ &rv1126_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "sclk_uart2_mux", 0,
+ RV1126_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 8, GFLAGS),
+ COMPOSITE(SCLK_UART3_DIV, "sclk_uart3_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(14), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 9, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART3_FRAC, "sclk_uart3_frac", "sclk_uart3_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(15), 0,
+ RV1126_CLKGATE_CON(5), 10, GFLAGS,
+ &rv1126_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "sclk_uart3_mux", 0,
+ RV1126_CLKGATE_CON(5), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4_DIV, "sclk_uart4_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(16), 8, 2, MFLAGS, 0, 7,
+ DFLAGS, RV1126_CLKGATE_CON(5), 13, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART4_FRAC, "sclk_uart4_frac", "sclk_uart4_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(17), 0,
+ RV1126_CLKGATE_CON(5), 14, GFLAGS,
+ &rv1126_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "sclk_uart4_mux", 0,
+ RV1126_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE(SCLK_UART5_DIV, "sclk_uart5_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(18), 8, 2, MFLAGS, 0, 7,
+ DFLAGS, RV1126_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART5_FRAC, "sclk_uart5_frac", "sclk_uart5_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(19), 0,
+ RV1126_CLKGATE_CON(6), 2, GFLAGS,
+ &rv1126_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "sclk_uart5_mux", 0,
+ RV1126_CLKGATE_CON(6), 3, GFLAGS),
+
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C1, "clk_i2c1", "gpll", 0,
+ RV1126_CLKSEL_CON(5), 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C3, "clk_i2c3", "gpll", 0,
+ RV1126_CLKSEL_CON(5), 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C4, "clk_i2c4", "gpll", 0,
+ RV1126_CLKSEL_CON(6), 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C5, "clk_i2c5", "gpll", 0,
+ RV1126_CLKSEL_CON(6), 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 1, GFLAGS),
+
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CLK_SPI1, "clk_spi1", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(8), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 3, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM2, "clk_capture_pwm2", "xin24m", 0,
+ RV1126_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 4, GFLAGS),
+ COMPOSITE(CLK_PWM2, "clk_pwm2", mux_xin24m_gpll_p, 0,
+ RV1126_CLKSEL_CON(9), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 5, GFLAGS),
+
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 0, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO1, "dbclk_gpio1", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(21), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 2, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO2, "dbclk_gpio2", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(22), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 4, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO3, "dbclk_gpio3", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(23), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO4, "dbclk_gpio4", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(24), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 7, GFLAGS),
+
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RV1126_CLKSEL_CON(20), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(6), 5, GFLAGS),
+
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 7, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 13, GFLAGS),
+
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "hclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 6, GFLAGS),
+
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE(DCLK_DECOM, "dclk_decom", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(25), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(7), 13, GFLAGS),
+
+ GATE(PCLK_CAN, "pclk_can", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_CAN, "clk_can", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(7), 9, GFLAGS),
+ /* pclk_otp and clk_otp are controlled by sgrf_clkgat_con. */
+ SGRF_GATE(CLK_OTP, "clk_otp", "xin24m"),
+ SGRF_GATE(PCLK_OTP, "pclk_otp", "pclk_pdbus"),
+
+ GATE(PCLK_NPU_TSADC, "pclk_npu_tsadc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(24), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_NPU_TSADC, "clk_npu_tsadc", "xin24m", 0,
+ RV1126_CLKSEL_CON(71), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(24), 4, GFLAGS),
+ GATE(CLK_NPU_TSADCPHY, "clk_npu_tsadcphy", "clk_npu_tsadc", 0,
+ RV1126_CLKGATE_CON(24), 5, GFLAGS),
+ GATE(PCLK_CPU_TSADC, "pclk_cpu_tsadc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(24), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPU_TSADC, "clk_cpu_tsadc", "xin24m", 0,
+ RV1126_CLKSEL_CON(70), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(24), 1, GFLAGS),
+ GATE(CLK_CPU_TSADCPHY, "clk_cpu_tsadcphy", "clk_cpu_tsadc", 0,
+ RV1126_CLKGATE_CON(24), 2, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+ /* PD_AUDIO */
+ COMPOSITE_NOMUX(HCLK_PDAUDIO, "hclk_pdaudio", "gpll", 0,
+ RV1126_CLKSEL_CON(26), 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(9), 0, GFLAGS),
+
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE(MCLK_I2S0_TX_DIV, "mclk_i2s0_tx_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(27), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(9), 5, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S0_TX_FRACDIV, "mclk_i2s0_tx_fracdiv", "mclk_i2s0_tx_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(28), 0,
+ RV1126_CLKGATE_CON(9), 6, GFLAGS,
+ &rv1126_i2s0_tx_fracmux),
+ GATE(MCLK_I2S0_TX, "mclk_i2s0_tx", "mclk_i2s0_tx_mux", 0,
+ RV1126_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE(MCLK_I2S0_RX_DIV, "mclk_i2s0_rx_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(9), 7, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S0_RX_FRACDIV, "mclk_i2s0_rx_fracdiv", "mclk_i2s0_rx_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(29), 0,
+ RV1126_CLKGATE_CON(9), 8, GFLAGS,
+ &rv1126_i2s0_rx_fracmux),
+ GATE(MCLK_I2S0_RX, "mclk_i2s0_rx", "mclk_i2s0_rx_mux", 0,
+ RV1126_CLKGATE_CON(9), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S0_TX_OUT2IO, "mclk_i2s0_tx_out2io", mux_i2s0_tx_out2io_p, 0,
+ RV1126_CLKSEL_CON(30), 6, 1, MFLAGS,
+ RV1126_CLKGATE_CON(9), 13, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S0_RX_OUT2IO, "mclk_i2s0_rx_out2io", mux_i2s0_rx_out2io_p, 0,
+ RV1126_CLKSEL_CON(30), 8, 1, MFLAGS,
+ RV1126_CLKGATE_CON(9), 14, GFLAGS),
+
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE(MCLK_I2S1_DIV, "mclk_i2s1_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 1, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S1_FRACDIV, "mclk_i2s1_fracdiv", "mclk_i2s1_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(32), 0,
+ RV1126_CLKGATE_CON(10), 2, GFLAGS,
+ &rv1126_i2s1_fracmux),
+ GATE(MCLK_I2S1, "mclk_i2s1", "mclk_i2s1_mux", 0,
+ RV1126_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S1_OUT2IO, "mclk_i2s1_out2io", mux_i2s1_out2io_p, 0,
+ RV1126_CLKSEL_CON(31), 12, 1, MFLAGS,
+ RV1126_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 5, GFLAGS),
+ COMPOSITE(MCLK_I2S2_DIV, "mclk_i2s2_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(33), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S2_FRACDIV, "mclk_i2s2_fracdiv", "mclk_i2s2_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(34), 0,
+ RV1126_CLKGATE_CON(10), 7, GFLAGS,
+ &rv1126_i2s2_fracmux),
+ GATE(MCLK_I2S2, "mclk_i2s2", "mclk_i2s2_mux", 0,
+ RV1126_CLKGATE_CON(10), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S2_OUT2IO, "mclk_i2s2_out2io", mux_i2s2_out2io_p, 0,
+ RV1126_CLKSEL_CON(33), 10, 1, MFLAGS,
+ RV1126_CLKGATE_CON(10), 9, GFLAGS),
+
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 10, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(35), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 11, GFLAGS),
+
+ GATE(HCLK_AUDPWM, "hclk_audpwm", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 12, GFLAGS),
+ COMPOSITE(SCLK_ADUPWM_DIV, "sclk_audpwm_div", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 13, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_AUDPWM_FRACDIV, "sclk_audpwm_fracdiv", "sclk_audpwm_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(37), 0,
+ RV1126_CLKGATE_CON(10), 14, GFLAGS,
+ &rv1126_audpwm_fracmux),
+ GATE(SCLK_AUDPWM, "sclk_audpwm", "mclk_audpwm_mux", 0,
+ RV1126_CLKGATE_CON(10), 15, GFLAGS),
+
+ GATE(PCLK_ACDCDIG, "pclk_acdcdig", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(CLK_ACDCDIG_ADC, "clk_acdcdig_adc", "mclk_i2s0_rx", 0,
+ RV1126_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(CLK_ACDCDIG_DAC, "clk_acdcdig_dac", "mclk_i2s0_tx", 0,
+ RV1126_CLKGATE_CON(11), 3, GFLAGS),
+ COMPOSITE(CLK_ACDCDIG_I2C, "clk_acdcdig_i2c", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(72), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(11), 1, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 12
+ */
+ /* PD_PHP */
+ COMPOSITE(ACLK_PDPHP, "aclk_pdphp", mux_gpll_cpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(53), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PDPHP, "hclk_pdphp", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(53), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(17), 1, GFLAGS),
+ /* PD_SDCARD */
+ GATE(HCLK_PDSDMMC, "hclk_pdsdmmc", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_pdsdmmc", 0,
+ RV1126_CLKGATE_CON(18), 4, GFLAGS),
+ COMPOSITE(CLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(55), 14, 2, MFLAGS, 0, 8,
+ DFLAGS, RV1126_CLKGATE_CON(18), 5, GFLAGS),
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RV1126_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RV1126_SDMMC_CON1, 1),
+
+ /* PD_SDIO */
+ GATE(HCLK_PDSDIO, "hclk_pdsdio", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_pdsdio", 0,
+ RV1126_CLKGATE_CON(18), 6, GFLAGS),
+ COMPOSITE(CLK_SDIO, "clk_sdio", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(56), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 7, GFLAGS),
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RV1126_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RV1126_SDIO_CON1, 1),
+
+ /* PD_NVM */
+ GATE(HCLK_PDNVM, "hclk_pdnvm", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(CLK_EMMC, "clk_emmc", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(57), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 9, GFLAGS),
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 13, GFLAGS),
+ COMPOSITE(CLK_NANDC, "clk_nandc", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(59), 15, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(HCLK_SFCXIP, "hclk_sfcxip", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 11, GFLAGS),
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(58), 15, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 12, GFLAGS),
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RV1126_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", RV1126_EMMC_CON1, 1),
+
+ /* PD_USB */
+ GATE(ACLK_PDUSB, "aclk_pdusb", "aclk_pdphp", 0,
+ RV1126_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(HCLK_PDUSB, "hclk_pdusb", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(HCLK_USBHOST, "hclk_usbhost", "hclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(HCLK_USBHOST_ARB, "hclk_usbhost_arb", "hclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 5, GFLAGS),
+ COMPOSITE(CLK_USBHOST_UTMI_OHCI, "clk_usbhost_utmi_ohci", mux_usb480m_gpll_p, 0,
+ RV1126_CLKSEL_CON(61), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(19), 6, GFLAGS),
+ GATE(ACLK_USBOTG, "aclk_usbotg", "aclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(CLK_USBOTG_REF, "clk_usbotg_ref", "xin24m", 0,
+ RV1126_CLKGATE_CON(19), 8, GFLAGS),
+ /* PD_GMAC */
+ GATE(ACLK_PDGMAC, "aclk_pdgmac", "aclk_pdphp", 0,
+ RV1126_CLKGATE_CON(20), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PDGMAC, "pclk_pdgmac", "aclk_pdgmac", 0,
+ RV1126_CLKSEL_CON(63), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 1, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_pdgmac", 0,
+ RV1126_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_pdgmac", 0,
+ RV1126_CLKGATE_CON(20), 5, GFLAGS),
+
+ COMPOSITE(CLK_GMAC_DIV, "clk_gmac_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(63), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(CLK_GMAC_RGMII_M0, "clk_gmac_rgmii_m0", "clk_gmac_rgmii_clkin_m0", 0,
+ RV1126_CLKGATE_CON(20), 12, GFLAGS),
+ MUX(CLK_GMAC_SRC_M0, "clk_gmac_src_m0", clk_gmac_src_m0_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 0, 1, MFLAGS),
+ GATE(CLK_GMAC_RGMII_M1, "clk_gmac_rgmii_m1", "clk_gmac_rgmii_clkin_m1", 0,
+ RV1126_CLKGATE_CON(20), 13, GFLAGS),
+ MUX(CLK_GMAC_SRC_M1, "clk_gmac_src_m1", clk_gmac_src_m1_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 5, 1, MFLAGS),
+ MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+
+ GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 7, GFLAGS),
+
+ GATE(CLK_GMAC_TX_SRC, "clk_gmac_tx_src", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 9, GFLAGS),
+ FACTOR(CLK_GMAC_TX_DIV5, "clk_gmac_tx_div5", "clk_gmac_tx_src", 0, 1, 5),
+ FACTOR(CLK_GMAC_TX_DIV50, "clk_gmac_tx_div50", "clk_gmac_tx_src", 0, 1, 50),
+ MUXTBL(RGMII_MODE_CLK, "rgmii_mode_clk", mux_rgmii_clk_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 2, 2, MFLAGS, rgmii_mux_idx),
+ GATE(CLK_GMAC_RX_SRC, "clk_gmac_rx_src", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 8, GFLAGS),
+ FACTOR(CLK_GMAC_RX_DIV2, "clk_gmac_rx_div2", "clk_gmac_rx_src", 0, 1, 2),
+ FACTOR(CLK_GMAC_RX_DIV20, "clk_gmac_rx_div20", "clk_gmac_rx_src", 0, 1, 20),
+ MUX(RMII_MODE_CLK, "rmii_mode_clk", mux_rmii_clk_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 1, 1, MFLAGS),
+ MUX(CLK_GMAC_TX_RX, "clk_gmac_tx_rx", mux_gmac_tx_rx_p, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ RV1126_GMAC_CON, 4, 1, MFLAGS),
+
+ GATE(CLK_GMAC_PTPREF, "clk_gmac_ptpref", "xin24m", 0,
+ RV1126_CLKGATE_CON(20), 10, GFLAGS),
+ COMPOSITE(CLK_GMAC_ETHERNET_OUT, "clk_gmac_ethernet_out2io", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(61), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 11, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 15
+ */
+ GATE(PCLK_PDTOP, "pclk_pdtop", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(PCLK_USBPHY_HOST, "pclk_usbphy_host", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(19), 13, GFLAGS),
+ GATE(PCLK_USBPHY_OTG, "pclk_usbphy_otg", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(19), 12, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RV1126_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "pclk_dbg_daplite", "pclk_dbg", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(0, "clk_a7_jtag", "clk_jtag_ori", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(0, "pclk_dbg_niu", "pclk_dbg", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 4, GFLAGS),
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ /* PD_BUS */
+ GATE(0, "aclk_pdbus_hold_niu1", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 10, GFLAGS),
+ GATE(0, "aclk_pdbus_niu1", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 3, GFLAGS),
+ GATE(0, "hclk_pdbus_niu1", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(0, "pclk_pdbus_niu1", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(0, "aclk_pdbus_niu2", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 6, GFLAGS),
+ GATE(0, "hclk_pdbus_niu2", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 7, GFLAGS),
+ GATE(0, "aclk_pdbus_niu3", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 8, GFLAGS),
+ GATE(0, "hclk_pdbus_niu3", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 9, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(6), 15, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(8), 4, GFLAGS),
+ GATE(0, "aclk_sysram", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 9, GFLAGS),
+ GATE(0, "pclk_intmux", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(7), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+ /* PD_AUDIO */
+ GATE(0, "hclk_pdaudio_niu", "hclk_pdaudio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pdaudio_niu", "hclk_pdaudio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(9), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 12
+ */
+ /* PD_PHP */
+ GATE(0, "aclk_pdphpmid", "aclk_pdphp", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(0, "hclk_pdphpmid", "hclk_pdphp", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(0, "aclk_pdphpmid_niu", "aclk_pdphpmid", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(0, "hclk_pdphpmid_niu", "hclk_pdphpmid", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 5, GFLAGS),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_pdsdmmc_niu", "hclk_pdsdmmc", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 7, GFLAGS),
+
+ /* PD_SDIO */
+ GATE(0, "hclk_pdsdio_niu", "hclk_pdsdio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 9, GFLAGS),
+
+ /* PD_NVM */
+ GATE(0, "hclk_pdnvm_niu", "hclk_pdnvm", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(18), 3, GFLAGS),
+
+ /* PD_USB */
+ GATE(0, "aclk_pdusb_niu", "aclk_pdusb", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(0, "hclk_pdusb_niu", "hclk_pdusb", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(19), 3, GFLAGS),
+
+ /* PD_GMAC */
+ GATE(0, "aclk_pdgmac_niu", "aclk_pdgmac", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 2, GFLAGS),
+ GATE(0, "pclk_pdgmac_niu", "pclk_pdgmac", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 13
+ */
+ /* PD_DDR */
+ COMPOSITE_NOMUX(0, "pclk_pdddr_pre", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(PCLK_PDDDR, "pclk_pdddr", "pclk_pdddr_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 15, GFLAGS),
+ GATE(0, "pclk_ddr_msch", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 6, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_DDRCLK, "sclk_ddrc", mux_dpll_gpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 15, 1, MFLAGS, 8, 5, DFLAGS |
+ CLK_DIVIDER_POWER_OF_TWO),
+ COMPOSITE(CLK_DDRPHY, "clk_ddrphy", mux_dpll_gpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(0, "clk1x_phy", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(0, "clk_ddr_msch", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 10, GFLAGS),
+ GATE(0, "pclk_ddr_dfictl", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(0, "clk_ddr_dfictl", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(0, "pclk_ddr_standby", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(0, "clk_ddr_standby", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 14, GFLAGS),
+ GATE(0, "aclk_ddr_split", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(0, "pclk_ddr_grf", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 5, GFLAGS),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 3, GFLAGS),
+ GATE(CLK_DDR_MON, "clk_ddr_mon", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 15, GFLAGS),
+ GATE(TMCLK_DDR_MON, "tmclk_ddr_mon", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 7, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 15
+ */
+ GATE(0, "pclk_topniu", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_TOPCRU, "pclk_topcru", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 10, GFLAGS),
+ GATE(PCLK_TOPGRF, "pclk_topgrf", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 11, GFLAGS),
+ GATE(PCLK_CPUEMADET, "pclk_cpuemadet", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 12, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 0, GFLAGS),
+};
+
+static const char *const rv1126_cru_critical_clocks[] __initconst = {
+ "gpll",
+ "cpll",
+ "hpll",
+ "armclk",
+ "pclk_dbg",
+ "pclk_pdpmu",
+ "aclk_pdbus",
+ "hclk_pdbus",
+ "pclk_pdbus",
+ "aclk_pdphp",
+ "hclk_pdphp",
+ "clk_ddrphy",
+ "pclk_pdddr",
+ "pclk_pdtop",
+ "clk_usbhost_utmi_ohci",
+ "aclk_pdjpeg_niu",
+ "hclk_pdjpeg_niu",
+ "aclk_pdvdec_niu",
+ "hclk_pdvdec_niu",
+};
+
+static void __init rv1126_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126_pmu_pll_clks,
+ ARRAY_SIZE(rv1126_pmu_pll_clks),
+ RV1126_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, rv1126_clk_pmu_branches,
+ ARRAY_SIZE(rv1126_clk_pmu_branches));
+
+ rockchip_register_softrst(np, 2, reg_base + RV1126_PMU_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+static void __init rv1126_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126_pll_clks,
+ ARRAY_SIZE(rv1126_pll_clks),
+ RV1126_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &rv1126_cpuclk_data, rv1126_cpuclk_rates,
+ ARRAY_SIZE(rv1126_cpuclk_rates));
+
+ rockchip_clk_register_branches(ctx, rv1126_clk_branches,
+ ARRAY_SIZE(rv1126_clk_branches));
+
+ rockchip_register_softrst(np, 15, reg_base + RV1126_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RV1126_GLB_SRST_FST, NULL);
+
+ rockchip_clk_protect_critical(rv1126_cru_critical_clocks,
+ ARRAY_SIZE(rv1126_cru_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+struct clk_rv1126_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rv1126_inits clk_rv1126_pmucru_init = {
+ .inits = rv1126_pmu_clk_init,
+};
+
+static const struct clk_rv1126_inits clk_rv1126_cru_init = {
+ .inits = rv1126_clk_init,
+};
+
+static const struct of_device_id clk_rv1126_match_table[] = {
+ {
+ .compatible = "rockchip,rv1126-cru",
+ .data = &clk_rv1126_cru_init,
+ }, {
+ .compatible = "rockchip,rv1126-pmucru",
+ .data = &clk_rv1126_pmucru_init,
+ },
+ { }
+};
+
+static int __init clk_rv1126_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct clk_rv1126_inits *init_data;
+
+ init_data = (struct clk_rv1126_inits *)of_device_get_match_data(&pdev->dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(np);
+
+ return 0;
+}
+
+static struct platform_driver clk_rv1126_driver = {
+ .driver = {
+ .name = "clk-rv1126",
+ .of_match_table = clk_rv1126_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rv1126_driver, clk_rv1126_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index bb8a844309bf..e63d4f20b479 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -40,6 +40,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *base,
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
+ u32 *mux_table,
int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
struct clk_div_table *div_table, int gate_offset,
u8 gate_shift, u8 gate_flags, unsigned long flags,
@@ -62,6 +63,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
mux->shift = mux_shift;
mux->mask = BIT(mux_width) - 1;
mux->flags = mux_flags;
+ mux->table = mux_table;
mux->lock = lock;
mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
: &clk_mux_ops;
@@ -270,6 +272,8 @@ static struct clk *rockchip_clk_register_frac_branch(
frac_mux->shift = child->mux_shift;
frac_mux->mask = BIT(child->mux_width) - 1;
frac_mux->flags = child->mux_flags;
+ if (child->mux_table)
+ frac_mux->table = child->mux_table;
frac_mux->lock = lock;
frac_mux->hw.init = &init;
@@ -444,11 +448,21 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
- clk = clk_register_mux(NULL, list->name,
- list->parent_names, list->num_parents,
- flags, ctx->reg_base + list->muxdiv_offset,
- list->mux_shift, list->mux_width,
- list->mux_flags, &ctx->lock);
+ if (list->mux_table)
+ clk = clk_register_mux_table(NULL, list->name,
+ list->parent_names, list->num_parents,
+ flags,
+ ctx->reg_base + list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, list->mux_table,
+ &ctx->lock);
+ else
+ clk = clk_register_mux(NULL, list->name,
+ list->parent_names, list->num_parents,
+ flags,
+ ctx->reg_base + list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, &ctx->lock);
break;
case branch_muxgrf:
clk = rockchip_clk_register_muxgrf(list->name,
@@ -506,7 +520,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base, list->muxdiv_offset,
list->mux_shift,
list->mux_width, list->mux_flags,
- list->div_offset, list->div_shift, list->div_width,
+ list->mux_table, list->div_offset,
+ list->div_shift, list->div_width,
list->div_flags, list->div_table,
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &ctx->lock);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 7aa45cc70287..ee01739e4a7c 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -79,6 +79,25 @@ struct clk;
#define RV1108_EMMC_CON0 0x1e8
#define RV1108_EMMC_CON1 0x1ec
+#define RV1126_PMU_MODE 0x0
+#define RV1126_PMU_PLL_CON(x) ((x) * 0x4 + 0x10)
+#define RV1126_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RV1126_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180)
+#define RV1126_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200)
+#define RV1126_PLL_CON(x) ((x) * 0x4)
+#define RV1126_MODE_CON 0x90
+#define RV1126_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RV1126_CLKGATE_CON(x) ((x) * 0x4 + 0x280)
+#define RV1126_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define RV1126_GLB_SRST_FST 0x408
+#define RV1126_GLB_SRST_SND 0x40c
+#define RV1126_SDMMC_CON0 0x440
+#define RV1126_SDMMC_CON1 0x444
+#define RV1126_SDIO_CON0 0x448
+#define RV1126_SDIO_CON1 0x44c
+#define RV1126_EMMC_CON0 0x450
+#define RV1126_EMMC_CON1 0x454
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
@@ -448,6 +467,7 @@ struct rockchip_clk_branch {
u8 mux_shift;
u8 mux_width;
u8 mux_flags;
+ u32 *mux_table;
int div_offset;
u8 div_shift;
u8 div_width;
@@ -680,6 +700,22 @@ struct rockchip_clk_branch {
.gate_offset = -1, \
}
+#define MUXTBL(_id, cname, pnames, f, o, s, w, mf, mt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_mux, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = o, \
+ .mux_shift = s, \
+ .mux_width = w, \
+ .mux_flags = mf, \
+ .gate_offset = -1, \
+ .mux_table = mt, \
+ }
+
#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index e6d6cbf8c4e6..273f77d54dab 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -81,19 +81,17 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
const struct exynos_clkout_variant *variant;
- const struct of_device_id *match;
if (!dev->parent) {
dev_err(dev, "not instantiated from MFD\n");
return -EINVAL;
}
- match = of_match_device(exynos_clkout_ids, dev->parent);
- if (!match) {
+ variant = of_device_get_match_data(dev->parent);
+ if (!variant) {
dev_err(dev, "cannot match parent device\n");
return -EINVAL;
}
- variant = match->data;
*mux_mask = variant->mux_mask;
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index a7b106302706..62ce6814f141 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -27,6 +27,11 @@
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_G3D 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD 0x1038
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1058
#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0 0x105c
#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1 0x1060
@@ -39,6 +44,11 @@
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_G3D 0x1824
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD 0x1848
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD 0x184c
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO 0x1850
+#define CLK_CON_DIV_CLKCMU_FSYS_USB30DRD 0x1854
#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x1874
#define CLK_CON_DIV_CLKCMU_PERI_SPI0 0x1878
#define CLK_CON_DIV_CLKCMU_PERI_SPI1 0x187c
@@ -59,6 +69,11 @@
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_G3D 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD 0x2054
#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x207c
#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0 0x2080
#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1 0x2084
@@ -76,6 +91,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_G3D,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0,
CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1,
@@ -88,6 +108,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_G3D,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_DIV_CLKCMU_FSYS_USB30DRD,
CLK_CON_DIV_CLKCMU_PERI_BUS,
CLK_CON_DIV_CLKCMU_PERI_SPI0,
CLK_CON_DIV_CLKCMU_PERI_SPI1,
@@ -108,6 +133,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_G3D,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD,
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0,
CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1,
@@ -146,6 +176,13 @@ PNAME(mout_peri_usi0_p) = { "oscclk", "dout_shared0_div4" };
PNAME(mout_peri_usi1_p) = { "oscclk", "dout_shared0_div4" };
PNAME(mout_peri_usi2_p) = { "oscclk", "dout_shared0_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_FSYS */
+PNAME(mout_fsys_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_card_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_embd_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_sdio_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_usb30drd_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
@@ -174,6 +211,18 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_PERI_USI1, 0, 1),
MUX(CLK_MOUT_PERI_USI2, "mout_peri_usi2", mout_peri_usi2_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_USI2, 0, 1),
+
+ /* FSYS */
+ MUX(CLK_MOUT_FSYS_BUS, "mout_fsys_bus", mout_fsys_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_CARD, "mout_fsys_mmc_card", mout_fsys_mmc_card_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_EMBD, "mout_fsys_mmc_embd", mout_fsys_mmc_embd_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_SDIO, "mout_fsys_mmc_sdio", mout_fsys_mmc_sdio_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO, 0, 1),
+ MUX(CLK_MOUT_FSYS_USB30DRD, "mout_fsys_usb30drd", mout_fsys_usb30drd_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD, 0, 1),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
@@ -220,6 +269,18 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_PERI_USI1, 0, 4),
DIV(CLK_DOUT_PERI_USI2, "dout_peri_usi2", "gout_peri_usi2",
CLK_CON_DIV_CLKCMU_PERI_USI2, 0, 4),
+
+ /* FSYS */
+ DIV(CLK_DOUT_FSYS_BUS, "dout_fsys_bus", "gout_fsys_bus",
+ CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD, "dout_fsys_mmc_card", "gout_fsys_mmc_card",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD, 0, 9),
+ DIV(CLK_DOUT_FSYS_MMC_EMBD, "dout_fsys_mmc_embd", "gout_fsys_mmc_embd",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD, 0, 9),
+ DIV(CLK_DOUT_FSYS_MMC_SDIO, "dout_fsys_mmc_sdio", "gout_fsys_mmc_sdio",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO, 0, 9),
+ DIV(CLK_DOUT_FSYS_USB30DRD, "dout_fsys_usb30drd", "gout_fsys_usb30drd",
+ CLK_CON_DIV_CLKCMU_FSYS_USB30DRD, 0, 4),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
@@ -250,6 +311,18 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_PERI_USI1, 21, 0, 0),
GATE(CLK_GOUT_PERI_USI2, "gout_peri_usi2", "mout_peri_usi2",
CLK_CON_GAT_GATE_CLKCMU_PERI_USI2, 21, 0, 0),
+
+ /* FSYS */
+ GATE(CLK_GOUT_FSYS_BUS, "gout_fsys_bus", "mout_fsys_bus",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_CARD, "gout_fsys_mmc_card", "mout_fsys_mmc_card",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_EMBD, "gout_fsys_mmc_embd", "mout_fsys_mmc_embd",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_SDIO, "gout_fsys_mmc_sdio", "mout_fsys_mmc_sdio",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_USB30DRD, "gout_fsys_usb30drd", "mout_fsys_usb30drd",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD, 21, 0, 0),
};
static const struct samsung_cmu_info top_cmu_info __initconst = {
@@ -498,13 +571,20 @@ CLK_OF_DECLARE(exynos7885_cmu_peri, "samsung,exynos7885-cmu-peri",
/* ---- CMU_CORE ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CORE (0x12000000) */
-#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
-#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
-#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
-#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
-#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
-#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
-#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK 0x215c
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK 0x2160
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK 0x2164
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE 0x2168
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE 0x216c
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK 0x2170
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE 0x2174
static const unsigned long core_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
@@ -514,6 +594,13 @@ static const unsigned long core_clk_regs[] __initconst = {
CLK_CON_DIV_DIV_CLK_CORE_BUSP,
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
CLK_CON_GAT_GOUT_CORE_GIC400_CLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE,
};
/* List of parent clocks for Muxes in CMU_CORE */
@@ -545,6 +632,27 @@ static const struct samsung_gate_clock core_gate_clks[] __initconst = {
/* GIC (interrupt controller) clock must be always running */
GATE(CLK_GOUT_GIC400_CLK, "gout_gic400_clk", "mout_core_gic",
CLK_CON_GAT_GOUT_CORE_GIC400_CLK, 21, CLK_IS_CRITICAL, 0),
+ /*
+ * TREX D and P Core (seems to be related to "bus traffic shaper")
+ * clocks must always be running
+ */
+ GATE(CLK_GOUT_TREX_D_CORE_ACLK, "gout_trex_d_core_aclk", "mout_core_bus_user",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_D_CORE_GCLK, "gout_trex_d_core_gclk", "mout_core_g3d_user",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_D_CORE_PCLK, "gout_trex_d_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_ACLK_P_CORE, "gout_trex_p_core_aclk_p_core",
+ "mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_CCLK_P_CORE, "gout_trex_p_core_cclk_p_core",
+ "mout_core_cci_user", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_PCLK, "gout_trex_p_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_PCLK_P_CORE, "gout_trex_p_core_pclk_p_core",
+ "dout_core_busp", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info core_cmu_info __initconst = {
@@ -560,6 +668,88 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_core_bus",
};
+/* ---- CMU_FSYS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_FSYS (0x13400000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+
+static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS */
+PNAME(mout_fsys_bus_user_p) = { "oscclk", "dout_fsys_bus" };
+PNAME(mout_fsys_mmc_card_user_p) = { "oscclk", "dout_fsys_mmc_card" };
+PNAME(mout_fsys_mmc_embd_user_p) = { "oscclk", "dout_fsys_mmc_embd" };
+PNAME(mout_fsys_mmc_sdio_user_p) = { "oscclk", "dout_fsys_mmc_sdio" };
+PNAME(mout_fsys_usb30drd_user_p) = { "oscclk", "dout_fsys_usb30drd" };
+
+static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user", mout_fsys_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_FSYS_MMC_CARD_USER, "mout_fsys_mmc_card_user",
+ mout_fsys_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_MMC_EMBD_USER, "mout_fsys_mmc_embd_user",
+ mout_fsys_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_MMC_SDIO_USER, "mout_fsys_mmc_sdio_user",
+ mout_fsys_mmc_sdio_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
+ mout_fsys_usb30drd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
+ "mout_fsys_mmc_card_user", CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
+ "mout_fsys_mmc_embd_user", CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MMC_SDIO_ACLK, "gout_mmc_sdio_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_SDIO_SDCLKIN, "gout_mmc_sdio_sdclkin",
+ "mout_fsys_mmc_sdio_user", CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .mux_clks = fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .nr_clk_ids = FSYS_NR_CLK,
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .clk_name = "dout_fsys_bus",
+};
+
/* ---- platform_driver ----------------------------------------------------- */
static int __init exynos7885_cmu_probe(struct platform_device *pdev)
@@ -578,6 +768,9 @@ static const struct of_device_id exynos7885_cmu_of_match[] = {
.compatible = "samsung,exynos7885-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynos7885-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
},
};
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index cd9725f1dbf7..541761e96aeb 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -30,6 +30,7 @@
#define PLL_CON0_PLL_SHARED1 0x0180
#define PLL_CON3_PLL_SHARED1 0x018c
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_AUD 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
@@ -38,10 +39,19 @@
#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_IS_BUS 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_IS_GDC 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_IS_ITP 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_IS_VRA 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC 0x1064
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_AUD 0x1810
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
@@ -50,6 +60,14 @@
#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
+#define CLK_CON_DIV_CLKCMU_IS_BUS 0x1854
+#define CLK_CON_DIV_CLKCMU_IS_GDC 0x1858
+#define CLK_CON_DIV_CLKCMU_IS_ITP 0x185c
+#define CLK_CON_DIV_CLKCMU_IS_VRA 0x1860
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG 0x1864
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_M2M 0x1868
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC 0x186c
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_MFC 0x1870
#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x187c
#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1880
#define CLK_CON_DIV_CLKCMU_PERI_UART 0x1884
@@ -60,6 +78,7 @@
#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD 0x200c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
@@ -68,6 +87,14 @@
#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_IS_BUS 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_IS_GDC 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_IS_ITP 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_IS_VRA 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC 0x206c
#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x2080
#define CLK_CON_GAT_GATE_CLKCMU_PERI_IP 0x2084
#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART 0x2088
@@ -83,6 +110,7 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
@@ -91,10 +119,19 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
+ CLK_CON_MUX_MUX_CLKCMU_IS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_IS_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_IS_ITP,
+ CLK_CON_MUX_MUX_CLKCMU_IS_VRA,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD,
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
@@ -103,6 +140,14 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_HSI_BUS,
CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
+ CLK_CON_DIV_CLKCMU_IS_BUS,
+ CLK_CON_DIV_CLKCMU_IS_GDC,
+ CLK_CON_DIV_CLKCMU_IS_ITP,
+ CLK_CON_DIV_CLKCMU_IS_VRA,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MFC,
CLK_CON_DIV_CLKCMU_PERI_BUS,
CLK_CON_DIV_CLKCMU_PERI_IP,
CLK_CON_DIV_CLKCMU_PERI_UART,
@@ -113,6 +158,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_PLL_SHARED1_DIV3,
CLK_CON_DIV_PLL_SHARED1_DIV4,
CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD,
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
@@ -121,6 +167,14 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
+ CLK_CON_GAT_GATE_CLKCMU_IS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_IS_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_IS_ITP,
+ CLK_CON_GAT_GATE_CLKCMU_IS_VRA,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC,
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
CLK_CON_GAT_GATE_CLKCMU_PERI_IP,
CLK_CON_GAT_GATE_CLKCMU_PERI_UART,
@@ -148,6 +202,9 @@ PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_APM */
PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared0_div4", "pll_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_AUD */
+PNAME(mout_aud_p) = { "fout_shared1_pll", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
@@ -167,13 +224,30 @@ PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
"oscclk", "oscclk" };
PNAME(mout_hsi_usb20drd_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_IS */
+PNAME(mout_is_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_itp_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_vra_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_gdc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_m2m_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_mcsc_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_jpeg_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
PNAME(mout_peri_uart_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
PNAME(mout_peri_ip_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
-
/* List of parent clocks for Muxes in CMU_TOP: for CMU_DPU */
PNAME(mout_dpu_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
@@ -191,6 +265,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus",
mout_clkcmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ /* AUD */
+ MUX(CLK_MOUT_AUD, "mout_aud", mout_aud_p,
+ CLK_CON_MUX_MUX_CLKCMU_AUD, 0, 2),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -213,6 +291,26 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_HSI_USB20DRD, "mout_hsi_usb20drd", mout_hsi_usb20drd_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD, 0, 2),
+ /* IS */
+ MUX(CLK_MOUT_IS_BUS, "mout_is_bus", mout_is_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_BUS, 0, 2),
+ MUX(CLK_MOUT_IS_ITP, "mout_is_itp", mout_is_itp_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_ITP, 0, 2),
+ MUX(CLK_MOUT_IS_VRA, "mout_is_vra", mout_is_vra_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_VRA, 0, 2),
+ MUX(CLK_MOUT_IS_GDC, "mout_is_gdc", mout_is_gdc_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_GDC, 0, 2),
+
+ /* MFCMSCL */
+ MUX(CLK_MOUT_MFCMSCL_MFC, "mout_mfcmscl_mfc", mout_mfcmscl_mfc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_M2M, "mout_mfcmscl_m2m", mout_mfcmscl_m2m_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_MCSC, "mout_mfcmscl_mcsc", mout_mfcmscl_mcsc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_JPEG, "mout_mfcmscl_jpeg", mout_mfcmscl_jpeg_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG, 0, 2),
+
/* PERI */
MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
@@ -241,6 +339,10 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus",
"gout_clkcmu_apm_bus", CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ /* AUD */
+ DIV(CLK_DOUT_AUD, "dout_aud", "gout_aud",
+ CLK_CON_DIV_CLKCMU_AUD, 0, 4),
+
/* CORE */
DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
@@ -263,6 +365,26 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_HSI_USB20DRD, "dout_hsi_usb20drd", "gout_hsi_usb20drd",
CLK_CON_DIV_CLKCMU_HSI_USB20DRD, 0, 4),
+ /* IS */
+ DIV(CLK_DOUT_IS_BUS, "dout_is_bus", "gout_is_bus",
+ CLK_CON_DIV_CLKCMU_IS_BUS, 0, 4),
+ DIV(CLK_DOUT_IS_ITP, "dout_is_itp", "gout_is_itp",
+ CLK_CON_DIV_CLKCMU_IS_ITP, 0, 4),
+ DIV(CLK_DOUT_IS_VRA, "dout_is_vra", "gout_is_vra",
+ CLK_CON_DIV_CLKCMU_IS_VRA, 0, 4),
+ DIV(CLK_DOUT_IS_GDC, "dout_is_gdc", "gout_is_gdc",
+ CLK_CON_DIV_CLKCMU_IS_GDC, 0, 4),
+
+ /* MFCMSCL */
+ DIV(CLK_DOUT_MFCMSCL_MFC, "dout_mfcmscl_mfc", "gout_mfcmscl_mfc",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MFC, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_M2M, "dout_mfcmscl_m2m", "gout_mfcmscl_m2m",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_M2M, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_MCSC, "dout_mfcmscl_mcsc", "gout_mfcmscl_mcsc",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_JPEG, "dout_mfcmscl_jpeg", "gout_mfcmscl_jpeg",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG, 0, 4),
+
/* PERI */
DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
@@ -287,6 +409,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus",
"mout_clkcmu_apm_bus", CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, 0, 0),
+ /* AUD */
+ GATE(CLK_GOUT_AUD, "gout_aud", "mout_aud",
+ CLK_CON_GAT_GATE_CLKCMU_AUD, 21, 0, 0),
+
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
@@ -299,6 +425,28 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI_USB20DRD, "gout_hsi_usb20drd", "mout_hsi_usb20drd",
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD, 21, 0, 0),
+ /* IS */
+ /* TODO: These clocks have to be always enabled to access CMU_IS regs */
+ GATE(CLK_GOUT_IS_BUS, "gout_is_bus", "mout_is_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IS_BUS, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_ITP, "gout_is_itp", "mout_is_itp",
+ CLK_CON_GAT_GATE_CLKCMU_IS_ITP, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_VRA, "gout_is_vra", "mout_is_vra",
+ CLK_CON_GAT_GATE_CLKCMU_IS_VRA, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_GDC, "gout_is_gdc", "mout_is_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_IS_GDC, 21, CLK_IS_CRITICAL, 0),
+
+ /* MFCMSCL */
+ /* TODO: These have to be always enabled to access CMU_MFCMSCL regs */
+ GATE(CLK_GOUT_MFCMSCL_MFC, "gout_mfcmscl_mfc", "mout_mfcmscl_mfc",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_M2M, "gout_mfcmscl_m2m", "mout_mfcmscl_m2m",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_MCSC, "gout_mfcmscl_mcsc", "mout_mfcmscl_mcsc",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_JPEG, "gout_mfcmscl_jpeg", "mout_mfcmscl_jpeg",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG, 21, CLK_IS_CRITICAL, 0),
+
/* PERI */
GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
@@ -463,6 +611,284 @@ static const struct samsung_cmu_info apm_cmu_info __initconst = {
.clk_name = "dout_clkcmu_apm_bus",
};
+/* ---- CMU_AUD ------------------------------------------------------------- */
+
+#define PLL_LOCKTIME_PLL_AUD 0x0000
+#define PLL_CON0_PLL_AUD 0x0100
+#define PLL_CON3_PLL_AUD 0x010c
+#define PLL_CON0_MUX_CLKCMU_AUD_CPU_USER 0x0600
+#define PLL_CON0_MUX_TICK_USB_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_AUD_CPU 0x1000
+#define CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH 0x1004
+#define CLK_CON_MUX_MUX_CLK_AUD_FM 0x1008
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF0 0x100c
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF1 0x1010
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF2 0x1014
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF3 0x1018
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF4 0x101c
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF5 0x1020
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF6 0x1024
+#define CLK_CON_DIV_DIV_CLK_AUD_MCLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_AUD_AUDIF 0x1804
+#define CLK_CON_DIV_DIV_CLK_AUD_BUSD 0x1808
+#define CLK_CON_DIV_DIV_CLK_AUD_BUSP 0x180c
+#define CLK_CON_DIV_DIV_CLK_AUD_CNT 0x1810
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU 0x1814
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK 0x1818
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG 0x181c
+#define CLK_CON_DIV_DIV_CLK_AUD_FM 0x1820
+#define CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY 0x1824
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF0 0x1828
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF1 0x182c
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF2 0x1830
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF3 0x1834
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF4 0x1838
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF5 0x183c
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF6 0x1840
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT 0x2000
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0 0x2004
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1 0x2008
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2 0x200c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3 0x2010
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4 0x2014
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5 0x2018
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6 0x201c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY 0x204c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB 0x2050
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32 0x2054
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP 0x2058
+#define CLK_CON_GAT_GOUT_AUD_CODEC_MCLK 0x206c
+#define CLK_CON_GAT_GOUT_AUD_TZPC_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_AUD_GPIO_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_AUD_PPMU_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_AUD_PPMU_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1 0x20b4
+#define CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK 0x20b8
+#define CLK_CON_GAT_GOUT_AUD_WDT_PCLK 0x20bc
+
+static const unsigned long aud_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUD,
+ PLL_CON0_PLL_AUD,
+ PLL_CON3_PLL_AUD,
+ PLL_CON0_MUX_CLKCMU_AUD_CPU_USER,
+ PLL_CON0_MUX_TICK_USB_USER,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH,
+ CLK_CON_MUX_MUX_CLK_AUD_FM,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF0,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF1,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF2,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF3,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF4,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF5,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF6,
+ CLK_CON_DIV_DIV_CLK_AUD_MCLK,
+ CLK_CON_DIV_DIV_CLK_AUD_AUDIF,
+ CLK_CON_DIV_DIV_CLK_AUD_BUSD,
+ CLK_CON_DIV_DIV_CLK_AUD_BUSP,
+ CLK_CON_DIV_DIV_CLK_AUD_CNT,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_AUD_FM,
+ CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF0,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF1,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF2,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF3,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF4,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF5,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF6,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6,
+ CLK_CON_GAT_GOUT_AUD_ABOX_ACLK,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP,
+ CLK_CON_GAT_GOUT_AUD_CODEC_MCLK,
+ CLK_CON_GAT_GOUT_AUD_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_AUD_GPIO_PCLK,
+ CLK_CON_GAT_GOUT_AUD_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_AUD_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1,
+ CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK,
+ CLK_CON_GAT_GOUT_AUD_WDT_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_AUD */
+PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll" };
+PNAME(mout_aud_cpu_user_p) = { "oscclk", "dout_aud" };
+PNAME(mout_aud_cpu_p) = { "dout_aud_cpu", "mout_aud_cpu_user" };
+PNAME(mout_aud_cpu_hch_p) = { "mout_aud_cpu", "oscclk" };
+PNAME(mout_aud_uaif0_p) = { "dout_aud_uaif0", "ioclk_audiocdclk0" };
+PNAME(mout_aud_uaif1_p) = { "dout_aud_uaif1", "ioclk_audiocdclk1" };
+PNAME(mout_aud_uaif2_p) = { "dout_aud_uaif2", "ioclk_audiocdclk2" };
+PNAME(mout_aud_uaif3_p) = { "dout_aud_uaif3", "ioclk_audiocdclk3" };
+PNAME(mout_aud_uaif4_p) = { "dout_aud_uaif4", "ioclk_audiocdclk4" };
+PNAME(mout_aud_uaif5_p) = { "dout_aud_uaif5", "ioclk_audiocdclk5" };
+PNAME(mout_aud_uaif6_p) = { "dout_aud_uaif6", "ioclk_audiocdclk6" };
+PNAME(mout_aud_tick_usb_user_p) = { "oscclk", "tick_usb" };
+PNAME(mout_aud_fm_p) = { "oscclk", "dout_aud_fm_spdy" };
+
+/*
+ * Do not provide PLL table to PLL_AUD, as MANUAL_PLL_CTRL bit is not set
+ * for that PLL by default, so set_rate operation would fail.
+ */
+static const struct samsung_pll_clock aud_pll_clks[] __initconst = {
+ PLL(pll_0831x, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk",
+ PLL_LOCKTIME_PLL_AUD, PLL_CON3_PLL_AUD, NULL),
+};
+
+static const struct samsung_fixed_rate_clock aud_fixed_clks[] __initconst = {
+ FRATE(IOCLK_AUDIOCDCLK0, "ioclk_audiocdclk0", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK1, "ioclk_audiocdclk1", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK2, "ioclk_audiocdclk2", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK3, "ioclk_audiocdclk3", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK4, "ioclk_audiocdclk4", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK5, "ioclk_audiocdclk5", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK6, "ioclk_audiocdclk6", NULL, 0, 25000000),
+ FRATE(TICK_USB, "tick_usb", NULL, 0, 60000000),
+};
+
+static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p,
+ PLL_CON0_PLL_AUD, 4, 1),
+ MUX(CLK_MOUT_AUD_CPU_USER, "mout_aud_cpu_user", mout_aud_cpu_user_p,
+ PLL_CON0_MUX_CLKCMU_AUD_CPU_USER, 4, 1),
+ MUX(CLK_MOUT_AUD_TICK_USB_USER, "mout_aud_tick_usb_user",
+ mout_aud_tick_usb_user_p,
+ PLL_CON0_MUX_TICK_USB_USER, 4, 1),
+ MUX(CLK_MOUT_AUD_CPU, "mout_aud_cpu", mout_aud_cpu_p,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU, 0, 1),
+ MUX(CLK_MOUT_AUD_CPU_HCH, "mout_aud_cpu_hch", mout_aud_cpu_hch_p,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF0, "mout_aud_uaif0", mout_aud_uaif0_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF0, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF1, "mout_aud_uaif1", mout_aud_uaif1_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF1, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF2, "mout_aud_uaif2", mout_aud_uaif2_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF2, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF3, "mout_aud_uaif3", mout_aud_uaif3_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF3, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF4, "mout_aud_uaif4", mout_aud_uaif4_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF4, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF5, "mout_aud_uaif5", mout_aud_uaif5_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF5, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF6, "mout_aud_uaif6", mout_aud_uaif6_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF6, 0, 1),
+ MUX(CLK_MOUT_AUD_FM, "mout_aud_fm", mout_aud_fm_p,
+ CLK_CON_MUX_MUX_CLK_AUD_FM, 0, 1),
+};
+
+static const struct samsung_div_clock aud_div_clks[] __initconst = {
+ DIV(CLK_DOUT_AUD_CPU, "dout_aud_cpu", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU, 0, 4),
+ DIV(CLK_DOUT_AUD_BUSD, "dout_aud_busd", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_BUSD, 0, 4),
+ DIV(CLK_DOUT_AUD_BUSP, "dout_aud_busp", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_BUSP, 0, 4),
+ DIV(CLK_DOUT_AUD_AUDIF, "dout_aud_audif", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_AUDIF, 0, 9),
+ DIV(CLK_DOUT_AUD_CPU_ACLK, "dout_aud_cpu_aclk", "mout_aud_cpu_hch",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK, 0, 3),
+ DIV(CLK_DOUT_AUD_CPU_PCLKDBG, "dout_aud_cpu_pclkdbg",
+ "mout_aud_cpu_hch",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_AUD_MCLK, "dout_aud_mclk", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_MCLK, 0, 2),
+ DIV(CLK_DOUT_AUD_CNT, "dout_aud_cnt", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_CNT, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF0, "dout_aud_uaif0", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF0, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF1, "dout_aud_uaif1", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF1, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF2, "dout_aud_uaif2", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF2, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF3, "dout_aud_uaif3", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF3, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF4, "dout_aud_uaif4", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF4, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF5, "dout_aud_uaif5", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF5, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF6, "dout_aud_uaif6", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF6, 0, 10),
+ DIV(CLK_DOUT_AUD_FM_SPDY, "dout_aud_fm_spdy", "mout_aud_tick_usb_user",
+ CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY, 0, 1),
+ DIV(CLK_DOUT_AUD_FM, "dout_aud_fm", "mout_aud_fm",
+ CLK_CON_DIV_DIV_CLK_AUD_FM, 0, 10),
+};
+
+static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_AUD_CA32_CCLK, "gout_aud_ca32_cclk", "mout_aud_cpu_hch",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_ASB_CCLK, "gout_aud_asb_cclk", "dout_aud_cpu_aclk",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_DAP_CCLK, "gout_aud_dap_cclk", "dout_aud_cpu_pclkdbg",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP, 21, 0, 0),
+ /* TODO: Should be enabled in ABOX driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_AUD_ABOX_ACLK, "gout_aud_abox_aclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_ABOX_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_AUD_GPIO_PCLK, "gout_aud_gpio_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_GPIO_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_PPMU_ACLK, "gout_aud_ppmu_aclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_PPMU_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_PPMU_PCLK, "gout_aud_ppmu_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_PPMU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SYSMMU_CLK, "gout_aud_sysmmu_clk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SYSREG_PCLK, "gout_aud_sysreg_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_WDT_PCLK, "gout_aud_wdt_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_WDT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_TZPC_PCLK, "gout_aud_tzpc_pclk", "dout_aud_busp",
+ CLK_CON_GAT_GOUT_AUD_TZPC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_CODEC_MCLK, "gout_aud_codec_mclk", "dout_aud_mclk",
+ CLK_CON_GAT_GOUT_AUD_CODEC_MCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_CNT_BCLK, "gout_aud_cnt_bclk", "dout_aud_cnt",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF0_BCLK, "gout_aud_uaif0_bclk", "mout_aud_uaif0",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF1_BCLK, "gout_aud_uaif1_bclk", "mout_aud_uaif1",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF2_BCLK, "gout_aud_uaif2_bclk", "mout_aud_uaif2",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF3_BCLK, "gout_aud_uaif3_bclk", "mout_aud_uaif3",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF4_BCLK, "gout_aud_uaif4_bclk", "mout_aud_uaif4",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF5_BCLK, "gout_aud_uaif5_bclk", "mout_aud_uaif5",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF6_BCLK, "gout_aud_uaif6_bclk", "mout_aud_uaif6",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SPDY_BCLK, "gout_aud_spdy_bclk", "dout_aud_fm",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info aud_cmu_info __initconst = {
+ .pll_clks = aud_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(aud_pll_clks),
+ .mux_clks = aud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
+ .div_clks = aud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(aud_div_clks),
+ .gate_clks = aud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
+ .fixed_clks = aud_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(aud_fixed_clks),
+ .nr_clk_ids = AUD_NR_CLK,
+ .clk_regs = aud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+ .clk_name = "dout_aud",
+};
+
/* ---- CMU_CMGP ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CMGP (0x11c00000) */
@@ -599,7 +1025,7 @@ static const unsigned long hsi_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
};
-/* List of parent clocks for Muxes in CMU_PERI */
+/* List of parent clocks for Muxes in CMU_HSI */
PNAME(mout_hsi_bus_user_p) = { "oscclk", "dout_hsi_bus" };
PNAME(mout_hsi_mmc_card_user_p) = { "oscclk", "dout_hsi_mmc_card" };
PNAME(mout_hsi_usb20drd_user_p) = { "oscclk", "dout_hsi_usb20drd" };
@@ -654,6 +1080,247 @@ static const struct samsung_cmu_info hsi_cmu_info __initconst = {
.clk_name = "dout_hsi_bus",
};
+/* ---- CMU_IS -------------------------------------------------------------- */
+
+#define PLL_CON0_MUX_CLKCMU_IS_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_IS_GDC_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_IS_ITP_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_IS_VRA_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_IS_BUSP 0x1800
+#define CLK_CON_GAT_CLK_IS_CMU_IS_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_IS_CSIS0_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_IS_CSIS1_ACLK 0x2044
+#define CLK_CON_GAT_GOUT_IS_CSIS2_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_IS_TZPC_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA 0x2050
+#define CLK_CON_GAT_GOUT_IS_CLK_GDC 0x2054
+#define CLK_CON_GAT_GOUT_IS_CLK_IPP 0x2058
+#define CLK_CON_GAT_GOUT_IS_CLK_ITP 0x205c
+#define CLK_CON_GAT_GOUT_IS_CLK_MCSC 0x2060
+#define CLK_CON_GAT_GOUT_IS_CLK_VRA 0x2064
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1 0x2098
+#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1 0x209c
+#define CLK_CON_GAT_GOUT_IS_SYSREG_PCLK 0x20a0
+
+static const unsigned long is_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_IS_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_IS_GDC_USER,
+ PLL_CON0_MUX_CLKCMU_IS_ITP_USER,
+ PLL_CON0_MUX_CLKCMU_IS_VRA_USER,
+ CLK_CON_DIV_DIV_CLK_IS_BUSP,
+ CLK_CON_GAT_CLK_IS_CMU_IS_PCLK,
+ CLK_CON_GAT_GOUT_IS_CSIS0_ACLK,
+ CLK_CON_GAT_GOUT_IS_CSIS1_ACLK,
+ CLK_CON_GAT_GOUT_IS_CSIS2_ACLK,
+ CLK_CON_GAT_GOUT_IS_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA,
+ CLK_CON_GAT_GOUT_IS_CLK_GDC,
+ CLK_CON_GAT_GOUT_IS_CLK_IPP,
+ CLK_CON_GAT_GOUT_IS_CLK_ITP,
+ CLK_CON_GAT_GOUT_IS_CLK_MCSC,
+ CLK_CON_GAT_GOUT_IS_CLK_VRA,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK,
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1,
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1,
+ CLK_CON_GAT_GOUT_IS_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_IS */
+PNAME(mout_is_bus_user_p) = { "oscclk", "dout_is_bus" };
+PNAME(mout_is_itp_user_p) = { "oscclk", "dout_is_itp" };
+PNAME(mout_is_vra_user_p) = { "oscclk", "dout_is_vra" };
+PNAME(mout_is_gdc_user_p) = { "oscclk", "dout_is_gdc" };
+
+static const struct samsung_mux_clock is_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IS_BUS_USER, "mout_is_bus_user", mout_is_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_IS_ITP_USER, "mout_is_itp_user", mout_is_itp_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_ITP_USER, 4, 1),
+ MUX(CLK_MOUT_IS_VRA_USER, "mout_is_vra_user", mout_is_vra_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_VRA_USER, 4, 1),
+ MUX(CLK_MOUT_IS_GDC_USER, "mout_is_gdc_user", mout_is_gdc_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_GDC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock is_div_clks[] __initconst = {
+ DIV(CLK_DOUT_IS_BUSP, "dout_is_busp", "mout_is_bus_user",
+ CLK_CON_DIV_DIV_CLK_IS_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock is_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in IS driver */
+ GATE(CLK_GOUT_IS_CMU_IS_PCLK, "gout_is_cmu_is_pclk", "dout_is_busp",
+ CLK_CON_GAT_CLK_IS_CMU_IS_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_IS_CSIS0_ACLK, "gout_is_csis0_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS0_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS1_ACLK, "gout_is_csis1_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS1_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS2_ACLK, "gout_is_csis2_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS2_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_TZPC_PCLK, "gout_is_tzpc_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_TZPC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS_DMA_CLK, "gout_is_csis_dma_clk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA, 21, 0, 0),
+ GATE(CLK_GOUT_IS_GDC_CLK, "gout_is_gdc_clk", "mout_is_gdc_user",
+ CLK_CON_GAT_GOUT_IS_CLK_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_IS_IPP_CLK, "gout_is_ipp_clk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CLK_IPP, 21, 0, 0),
+ GATE(CLK_GOUT_IS_ITP_CLK, "gout_is_itp_clk", "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_CLK_ITP, 21, 0, 0),
+ GATE(CLK_GOUT_IS_MCSC_CLK, "gout_is_mcsc_clk", "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_CLK_MCSC, 21, 0, 0),
+ GATE(CLK_GOUT_IS_VRA_CLK, "gout_is_vra_clk", "mout_is_vra_user",
+ CLK_CON_GAT_GOUT_IS_CLK_VRA, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS0_ACLK, "gout_is_ppmu_is0_aclk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS0_PCLK, "gout_is_ppmu_is0_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS1_ACLK, "gout_is_ppmu_is1_aclk",
+ "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS1_PCLK, "gout_is_ppmu_is1_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSMMU_IS0_CLK, "gout_is_sysmmu_is0_clk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSMMU_IS1_CLK, "gout_is_sysmmu_is1_clk",
+ "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSREG_PCLK, "gout_is_sysreg_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_SYSREG_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info is_cmu_info __initconst = {
+ .mux_clks = is_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(is_mux_clks),
+ .div_clks = is_div_clks,
+ .nr_div_clks = ARRAY_SIZE(is_div_clks),
+ .gate_clks = is_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(is_gate_clks),
+ .nr_clk_ids = IS_NR_CLK,
+ .clk_regs = is_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(is_clk_regs),
+ .clk_name = "dout_is_bus",
+};
+
+/* ---- CMU_MFCMSCL --------------------------------------------------------- */
+
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP 0x1800
+#define CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK 0x203c
+#define CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK 0x204c
+#define CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK 0x2050
+#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1 0x2074
+#define CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK 0x2078
+
+static const unsigned long mfcmscl_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER,
+ CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP,
+ CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
+ CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_user_p) = { "oscclk", "dout_mfcmscl_mfc" };
+PNAME(mout_mfcmscl_m2m_user_p) = { "oscclk", "dout_mfcmscl_m2m" };
+PNAME(mout_mfcmscl_mcsc_user_p) = { "oscclk", "dout_mfcmscl_mcsc" };
+PNAME(mout_mfcmscl_jpeg_user_p) = { "oscclk", "dout_mfcmscl_jpeg" };
+
+static const struct samsung_mux_clock mfcmscl_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFCMSCL_MFC_USER, "mout_mfcmscl_mfc_user",
+ mout_mfcmscl_mfc_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_M2M_USER, "mout_mfcmscl_m2m_user",
+ mout_mfcmscl_m2m_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_MCSC_USER, "mout_mfcmscl_mcsc_user",
+ mout_mfcmscl_mcsc_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_JPEG_USER, "mout_mfcmscl_jpeg_user",
+ mout_mfcmscl_jpeg_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_div_clock mfcmscl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFCMSCL_BUSP, "dout_mfcmscl_busp", "mout_mfcmscl_mfc_user",
+ CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock mfcmscl_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in MFC driver */
+ GATE(CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK, "gout_mfcmscl_cmu_mfcmscl_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_MFCMSCL_TZPC_PCLK, "gout_mfcmscl_tzpc_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_JPEG_ACLK, "gout_mfcmscl_jpeg_aclk",
+ "mout_mfcmscl_jpeg_user", CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_M2M_ACLK, "gout_mfcmscl_m2m_aclk",
+ "mout_mfcmscl_m2m_user", CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_MCSC_CLK, "gout_mfcmscl_mcsc_clk",
+ "mout_mfcmscl_mcsc_user", CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_MFC_ACLK, "gout_mfcmscl_mfc_aclk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_PPMU_ACLK, "gout_mfcmscl_ppmu_aclk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_PPMU_PCLK, "gout_mfcmscl_ppmu_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_SYSMMU_CLK, "gout_mfcmscl_sysmmu_clk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_SYSREG_PCLK, "gout_mfcmscl_sysreg_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
+ .mux_clks = mfcmscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfcmscl_mux_clks),
+ .div_clks = mfcmscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfcmscl_div_clks),
+ .gate_clks = mfcmscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfcmscl_gate_clks),
+ .nr_clk_ids = MFCMSCL_NR_CLK,
+ .clk_regs = mfcmscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfcmscl_clk_regs),
+ .clk_name = "dout_mfcmscl_mfc",
+};
+
/* ---- CMU_PERI ------------------------------------------------------------ */
/* Register Offset definitions for CMU_PERI (0x10030000) */
@@ -963,7 +1630,7 @@ static const unsigned long dpu_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK,
};
-/* List of parent clocks for Muxes in CMU_CORE */
+/* List of parent clocks for Muxes in CMU_DPU */
PNAME(mout_dpu_user_p) = { "oscclk", "dout_dpu" };
static const struct samsung_mux_clock dpu_mux_clks[] __initconst = {
@@ -1028,12 +1695,21 @@ static const struct of_device_id exynos850_cmu_of_match[] = {
.compatible = "samsung,exynos850-cmu-apm",
.data = &apm_cmu_info,
}, {
+ .compatible = "samsung,exynos850-cmu-aud",
+ .data = &aud_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-cmgp",
.data = &cmgp_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
+ .compatible = "samsung,exynos850-cmu-is",
+ .data = &is_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-mfcmscl",
+ .data = &mfcmscl_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-core",
.data = &core_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index d9e1f8e4a7b4..7b16320bba66 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -1067,6 +1067,373 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_clkcmu_core_bus",
};
+/* ---- CMU_FSYS0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS2 (0x17700000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER 0x0610
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x2000
+
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN 0x2004
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN 0x2008
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN 0x200c
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN 0x2010
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN 0x2014
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN 0x2018
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PIPE_CLK 0x207c
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PIPE_CLK 0x20a4
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PIPE_CLK 0x20cc
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK 0x20e8
+
+
+static const unsigned long fsys0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS0 */
+PNAME(mout_fsys0_bus_user_p) = { "oscclk", "dout_clkcmu_fsys0_bus" };
+PNAME(mout_fsys0_pcie_user_p) = { "oscclk", "dout_clkcmu_fsys0_pcie" };
+
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS0_BUS_USER, "mout_fsys0_bus_user",
+ mout_fsys0_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS0_PCIE_USER, "mout_fsys0_pcie_user",
+ mout_fsys0_pcie_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS0_BUS_PCLK, "gout_fsys0_bus_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+
+ /* Gen3 2L0 */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK,
+ "gout_fsys0_pcie_gen3a_2l0_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK,
+ "gout_fsys0_pcie_gen3b_2l0_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK,
+ 21, 0, 0),
+
+ /* Gen3 2L1 */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK,
+ "gout_fsys0_pcie_gen3a_2l1_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK,
+ "gout_fsys0_pcie_gen3b_2l1_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK,
+ 21, 0, 0),
+
+ /* Gen3 4L */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_4l_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK,
+ "gout_fsys0_pcie_gen3_4l_x4_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK,
+ "gout_fsys0_pcie_gen3a_4l_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK,
+ "gout_fsys0_pcie_gen3b_4l_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .nr_clk_ids = FSYS0_NR_CLK,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+ .clk_name = "dout_clkcmu_fsys0_bus",
+};
+
+/* ---- CMU_FSYS1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS1 (0x17040000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0000
+#define PLL_CON0_PLL_MMC 0x0100
+#define PLL_CON3_PLL_MMC 0x010c
+#define PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_FSYS1_USBDRD_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_FSYS1_MMC_CARD 0x1000
+#define CLK_CON_DIV_DIV_CLK_FSYS1_MMC_CARD 0x1800
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2028
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_0_REF_CLK_40 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_1_REF_CLK_40 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_0_REF_CLK_40 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_1_REF_CLK_40 0x2070
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_0_IPCLKPORT_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_1_IPCLKPORT_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_0_IPCLKPORT_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_1_IPCLKPORT_ACLK 0x2080
+
+static const unsigned long fsys1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER,
+};
+
+static const struct samsung_pll_clock fsys1_pll_clks[] __initconst = {
+ PLL(pll_0831x, FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS1 */
+PNAME(mout_fsys1_bus_user_p) = { "oscclk", "dout_clkcmu_fsys1_bus" };
+PNAME(mout_fsys1_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_fsys1_mmc_card_user_p) = { "oscclk", "gout_clkcmu_fsys1_mmc_card" };
+PNAME(mout_fsys1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_fsys1_usbdrd" };
+PNAME(mout_fsys1_mmc_card_p) = { "mout_fsys1_mmc_card_user",
+ "mout_fsys1_mmc_pll" };
+
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS1_BUS_USER, "mout_fsys1_bus_user",
+ mout_fsys1_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_PLL, "mout_fsys1_mmc_pll", mout_fsys1_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_CARD_USER, "mout_fsys1_mmc_card_user",
+ mout_fsys1_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS1_USBDRD_USER, "mout_fsys1_usbdrd_user",
+ mout_fsys1_usbdrd_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_USBDRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_CARD, "mout_fsys1_mmc_card",
+ mout_fsys1_mmc_card_p, CLK_CON_MUX_MUX_CLK_FSYS1_MMC_CARD,
+ 0, 1),
+};
+
+static const struct samsung_div_clock fsys1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS1_MMC_CARD, "dout_fsys1_mmc_card",
+ "mout_fsys1_mmc_card",
+ CLK_CON_DIV_DIV_CLK_FSYS1_MMC_CARD, 0, 9),
+};
+
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS1_PCLK, "gout_fsys1_pclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN, "gout_fsys1_mmc_card_sdclkin",
+ "dout_fsys1_mmc_card",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_ACLK, "gout_fsys1_mmc_card_aclk",
+ "dout_fsys1_mmc_card",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20DRD_0_REFCLK, "gout_fsys1_usb20drd_0_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_0_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20DRD_1_REFCLK, "gout_fsys1_usb20drd_1_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_1_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30DRD_0_REFCLK, "gout_fsys1_usb30drd_0_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_0_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30DRD_1_REFCLK, "gout_fsys1_usb30drd_1_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_1_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20_0_ACLK, "gout_fsys1_usb20_0_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_0_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20_1_ACLK, "gout_fsys1_usb20_1_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30_0_ACLK, "gout_fsys1_usb30_0_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_0_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30_1_ACLK, "gout_fsys1_usb30_1_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
+ .pll_clks = fsys1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys1_pll_clks),
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .div_clks = fsys1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(fsys1_div_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .nr_clk_ids = FSYS1_NR_CLK,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+ .clk_name = "dout_clkcmu_fsys1_bus",
+};
+
/* ---- CMU_FSYS2 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_FSYS2 (0x17c00000) */
@@ -1170,9 +1537,9 @@ static const struct samsung_cmu_info fsys2_cmu_info __initconst = {
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2 0x2058
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3 0x205c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
-#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x204c
@@ -1330,6 +1697,10 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
"mout_peric0_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_1, "gout_peric0_pclk_1",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
GATE(CLK_GOUT_PERIC0_PCLK_2, "gout_peric0_pclk_2",
"mout_peric0_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
@@ -1418,14 +1789,14 @@ static const struct samsung_cmu_info peric0_cmu_info __initconst = {
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2020
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0 0x2044
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1 0x2048
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2058
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x205c
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2060
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x206c
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2064
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2068
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x2070
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2070
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x204c
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2050
@@ -1463,9 +1834,9 @@ static const unsigned long peric1_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
- CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
@@ -1581,6 +1952,10 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
"mout_peric1_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0,
21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_1, "gout_peric1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
GATE(CLK_GOUT_PERIC1_PCLK_2, "gout_peric1_pclk_2",
"mout_peric1_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
@@ -1702,6 +2077,12 @@ static const struct of_device_id exynosautov9_cmu_of_match[] = {
.compatible = "samsung,exynosautov9-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynosautov9-cmu-fsys0",
+ .data = &fsys0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-fsys1",
+ .data = &fsys1_cmu_info,
+ }, {
.compatible = "samsung,exynosautov9-cmu-fsys2",
.data = &fsys2_cmu_info,
}, {
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 41717ff707f6..ba8791303156 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 490701ac9e93..c192a9141b86 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -7,6 +7,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
#include "clk.h"
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index e18c80fbe804..c744bd9d2f96 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -21,4 +21,10 @@ config SPRD_SC9863A_CLK
help
Support for the global clock controller on sc9863a devices.
Say Y if you want to use peripheral devices on sc9863a SoC.
+
+config SPRD_UMS512_CLK
+ tristate "Support for the Spreadtrum UMS512 clocks"
+ help
+ Support for the global clock controller on ums512 devices.
+ Say Y if you want to use peripheral devices on ums512 SoC.
endif
diff --git a/drivers/clk/sprd/Makefile b/drivers/clk/sprd/Makefile
index 41d90e0d7863..f25b2c3904fb 100644
--- a/drivers/clk/sprd/Makefile
+++ b/drivers/clk/sprd/Makefile
@@ -11,3 +11,4 @@ clk-sprd-y += pll.o
## SoC support
obj-$(CONFIG_SPRD_SC9860_CLK) += sc9860-clk.o
obj-$(CONFIG_SPRD_SC9863A_CLK) += sc9863a-clk.o
+obj-$(CONFIG_SPRD_UMS512_CLK) += ums512-clk.o
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index d620bbbcdfc8..ce81e4087a8f 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -41,7 +41,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
{
void __iomem *base;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
+ struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
if (of_find_property(node, "sprd,syscon", NULL)) {
@@ -50,9 +50,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
- } else if (of_device_is_compatible(of_get_parent(dev->of_node),
- "syscon")) {
- regmap = device_node_to_regmap(of_get_parent(dev->of_node));
+ } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") ||
+ (of_node_put(np), 0)) {
+ regmap = device_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap from its parent.\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
new file mode 100644
index 000000000000..fc25bdd85e4e
--- /dev/null
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -0,0 +1,2202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Unisoc UMS512 clock driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Xiaolong Zhang <xiaolong.zhang@unisoc.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/sprd,ums512-clk.h>
+
+#include "common.h"
+#include "composite.h"
+#include "div.h"
+#include "gate.h"
+#include "mux.h"
+#include "pll.h"
+
+#define UMS512_MUX_FLAG \
+ (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT)
+
+/* pll gate clock */
+/* some pll clocks configure CLK_IGNORE_UNUSED because hw dvfs does not call
+ * clock interface. hw dvfs can not gate the pll clock.
+ */
+static CLK_FIXED_FACTOR_FW_NAME(clk_26m_aud, "clk-26m-aud", "ext-26m", 1, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_13m, "clk-13m", "ext-26m", 2, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_6m5, "clk-6m5", "ext-26m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_4m3, "clk-4m3", "ext-26m", 6, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_2m, "clk-2m", "ext-26m", 13, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_1m, "clk-1m", "ext-26m", 26, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_250k, "clk-250k", "ext-26m", 104, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_25m, "rco-25m", "rco-100m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_4m, "rco-4m", "rco-100m", 25, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_2m, "rco-2m", "rco-100m", 50, 1, 0);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x8c,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0xa0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(twpll_gate, "twpll-gate", "ext-26m", 0xa4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(rpll_gate, "rpll-gate", "ext-26m", 0xac,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(cppll_gate, "cppll-gate", "ext-26m", 0xe4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x190,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x194,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x198,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+
+static struct sprd_clk_common *ums512_pmu_gate_clks[] = {
+ /* address base is 0x327e0000 */
+ &isppll_gate.common,
+ &dpll0_gate.common,
+ &dpll1_gate.common,
+ &lpll_gate.common,
+ &twpll_gate.common,
+ &gpll_gate.common,
+ &rpll_gate.common,
+ &cppll_gate.common,
+ &mpll0_gate.common,
+ &mpll1_gate.common,
+ &mpll2_gate.common,
+};
+
+static struct clk_hw_onecell_data ums512_pmu_gate_hws = {
+ .hws = {
+ [CLK_26M_AUD] = &clk_26m_aud.hw,
+ [CLK_13M] = &clk_13m.hw,
+ [CLK_6M5] = &clk_6m5.hw,
+ [CLK_4M3] = &clk_4m3.hw,
+ [CLK_2M] = &clk_2m.hw,
+ [CLK_1M] = &clk_1m.hw,
+ [CLK_250K] = &clk_250k.hw,
+ [CLK_RCO_25M] = &rco_25m.hw,
+ [CLK_RCO_4M] = &rco_4m.hw,
+ [CLK_RCO_2M] = &rco_2m.hw,
+ [CLK_ISPPLL_GATE] = &isppll_gate.common.hw,
+ [CLK_DPLL0_GATE] = &dpll0_gate.common.hw,
+ [CLK_DPLL1_GATE] = &dpll1_gate.common.hw,
+ [CLK_LPLL_GATE] = &lpll_gate.common.hw,
+ [CLK_TWPLL_GATE] = &twpll_gate.common.hw,
+ [CLK_GPLL_GATE] = &gpll_gate.common.hw,
+ [CLK_RPLL_GATE] = &rpll_gate.common.hw,
+ [CLK_CPPLL_GATE] = &cppll_gate.common.hw,
+ [CLK_MPLL0_GATE] = &mpll0_gate.common.hw,
+ [CLK_MPLL1_GATE] = &mpll1_gate.common.hw,
+ [CLK_MPLL2_GATE] = &mpll2_gate.common.hw,
+ },
+ .num = CLK_PMU_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_pmu_gate_desc = {
+ .clk_clks = ums512_pmu_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_pmu_gate_clks),
+ .hw_clks = &ums512_pmu_gate_hws,
+};
+
+/* pll clock at g0 */
+static const u64 itable_dpll0[7] = { 6, 0, 0,
+ 1173000000ULL, 1475000000ULL,
+ 1855000000ULL, 1866000000ULL };
+
+static struct clk_bit_field f_dpll0[PLL_FACT_MAX] = {
+ { .shift = 18, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 4, .width = 3 }, /* icp */
+ { .shift = 7, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_HW(dpll0, "dpll0", &dpll0_gate.common.hw, 0x4, 3,
+ itable_dpll0, f_dpll0, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(dpll0_58m31, "dpll0-58m31", &dpll0.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g0_pll_clks[] = {
+ /* address base is 0x32390000 */
+ &dpll0.common,
+};
+
+static struct clk_hw_onecell_data ums512_g0_pll_hws = {
+ .hws = {
+ [CLK_DPLL0] = &dpll0.common.hw,
+ [CLK_DPLL0_58M31] = &dpll0_58m31.hw,
+ },
+ .num = CLK_ANLG_PHY_G0_NUM,
+};
+
+static struct sprd_clk_desc ums512_g0_pll_desc = {
+ .clk_clks = ums512_g0_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g0_pll_clks),
+ .hw_clks = &ums512_g0_pll_hws,
+};
+
+/* pll clock at g2 */
+static const u64 itable_mpll[8] = { 7, 0,
+ 1400000000ULL, 1600000000ULL,
+ 1800000000ULL, 2000000000ULL,
+ 2200000000ULL, 2500000000ULL };
+
+static struct clk_bit_field f_mpll[PLL_FACT_MAX] = {
+ { .shift = 17, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(mpll1, "mpll1", &mpll1_gate.common.hw, 0x0, 3,
+ itable_mpll, f_mpll, 240, 1000, 1000, 1, 1200000000);
+static CLK_FIXED_FACTOR_HW(mpll1_63m38, "mpll1-63m38", &mpll1.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g2_pll_clks[] = {
+ /* address base is 0x323B0000 */
+ &mpll1.common,
+};
+
+static struct clk_hw_onecell_data ums512_g2_pll_hws = {
+ .hws = {
+ [CLK_MPLL1] = &mpll1.common.hw,
+ [CLK_MPLL1_63M38] = &mpll1_63m38.hw,
+ },
+ .num = CLK_ANLG_PHY_G2_NUM,
+};
+
+static struct sprd_clk_desc ums512_g2_pll_desc = {
+ .clk_clks = ums512_g2_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g2_pll_clks),
+ .hw_clks = &ums512_g2_pll_hws,
+};
+
+/* pll at g3 */
+static const u64 itable[8] = { 7, 0, 0,
+ 900000000ULL, 1100000000ULL,
+ 1300000000ULL, 1500000000ULL,
+ 1600000000ULL };
+
+static struct clk_bit_field f_pll[PLL_FACT_MAX] = {
+ { .shift = 18, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+
+static SPRD_PLL_FW_NAME(rpll, "rpll", "ext-26m", 0x0, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+
+static SPRD_SC_GATE_CLK_FW_NAME(audio_gate, "audio-gate", "ext-26m", 0x24,
+ 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+
+static struct clk_bit_field f_mpll2[PLL_FACT_MAX] = {
+ { .shift = 16, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(mpll0, "mpll0", &mpll0_gate.common.hw, 0x54, 3,
+ itable_mpll, f_mpll, 240, 1000, 1000, 1, 1200000000);
+static CLK_FIXED_FACTOR_HW(mpll0_56m88, "mpll0-56m88", &mpll0.common.hw,
+ 32, 1, 0);
+
+static const u64 itable_mpll2[6] = { 5,
+ 1200000000ULL, 1400000000ULL,
+ 1600000000ULL, 1800000000ULL,
+ 2000000000ULL };
+
+static SPRD_PLL_HW(mpll2, "mpll2", &mpll2_gate.common.hw, 0x9c, 3,
+ itable_mpll2, f_mpll2, 240, 1000, 1000, 1, 1000000000);
+static CLK_FIXED_FACTOR_HW(mpll2_47m13, "mpll2-47m13", &mpll2.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g3_pll_clks[] = {
+ /* address base is 0x323c0000 */
+ &rpll.common,
+ &audio_gate.common,
+ &mpll0.common,
+ &mpll2.common,
+};
+
+static struct clk_hw_onecell_data ums512_g3_pll_hws = {
+ .hws = {
+ [CLK_RPLL] = &rpll.common.hw,
+ [CLK_AUDIO_GATE] = &audio_gate.common.hw,
+ [CLK_MPLL0] = &mpll0.common.hw,
+ [CLK_MPLL0_56M88] = &mpll0_56m88.hw,
+ [CLK_MPLL2] = &mpll2.common.hw,
+ [CLK_MPLL2_47M13] = &mpll2_47m13.hw,
+ },
+ .num = CLK_ANLG_PHY_G3_NUM,
+};
+
+static struct sprd_clk_desc ums512_g3_pll_desc = {
+ .clk_clks = ums512_g3_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g3_pll_clks),
+ .hw_clks = &ums512_g3_pll_hws,
+};
+
+/* pll clock at gc */
+static SPRD_PLL_FW_NAME(twpll, "twpll", "ext-26m", 0x0, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(twpll_768m, "twpll-768m", &twpll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_384m, "twpll-384m", &twpll.common.hw,
+ 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_192m, "twpll-192m", &twpll.common.hw,
+ 8, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_96m, "twpll-96m", &twpll.common.hw,
+ 16, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_48m, "twpll-48m", &twpll.common.hw,
+ 32, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_24m, "twpll-24m", &twpll.common.hw,
+ 64, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m, "twpll-12m", &twpll.common.hw,
+ 128, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_512m, "twpll-512m", &twpll.common.hw,
+ 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_256m, "twpll-256m", &twpll.common.hw,
+ 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_128m, "twpll-128m", &twpll.common.hw,
+ 12, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_64m, "twpll-64m", &twpll.common.hw,
+ 24, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_307m2, "twpll-307m2", &twpll.common.hw,
+ 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_219m4, "twpll-219m4", &twpll.common.hw,
+ 7, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_170m6, "twpll-170m6", &twpll.common.hw,
+ 9, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_153m6, "twpll-153m6", &twpll.common.hw,
+ 10, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_76m8, "twpll-76m8", &twpll.common.hw,
+ 20, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_51m2, "twpll-51m2", &twpll.common.hw,
+ 30, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_38m4, "twpll-38m4", &twpll.common.hw,
+ 40, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_19m2, "twpll-19m2", &twpll.common.hw,
+ 80, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m29, "twpll-12m29", &twpll.common.hw,
+ 125, 1, 0);
+
+static SPRD_PLL_FW_NAME(lpll, "lpll", "ext-26m", 0x18, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(lpll_614m4, "lpll-614m4", &lpll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_409m6, "lpll-409m6", &lpll.common.hw,
+ 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_245m76, "lpll-245m76", &lpll.common.hw,
+ 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_30m72, "lpll-30m72", &lpll.common.hw,
+ 40, 1, 0);
+
+static SPRD_PLL_FW_NAME(isppll, "isppll", "ext-26m", 0x30, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(isppll_468m, "isppll-468m", &isppll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(isppll_78m, "isppll-78m", &isppll.common.hw,
+ 12, 1, 0);
+
+static SPRD_PLL_HW(gpll, "gpll", &gpll_gate.common.hw, 0x48, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(gpll_40m, "gpll-40m", &gpll.common.hw,
+ 20, 1, 0);
+
+static SPRD_PLL_HW(cppll, "cppll", &cppll_gate.common.hw, 0x60, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(cppll_39m32, "cppll-39m32", &cppll.common.hw,
+ 26, 1, 0);
+
+static struct sprd_clk_common *ums512_gc_pll_clks[] = {
+ /* address base is 0x323e0000 */
+ &twpll.common,
+ &lpll.common,
+ &isppll.common,
+ &gpll.common,
+ &cppll.common,
+};
+
+static struct clk_hw_onecell_data ums512_gc_pll_hws = {
+ .hws = {
+ [CLK_TWPLL] = &twpll.common.hw,
+ [CLK_TWPLL_768M] = &twpll_768m.hw,
+ [CLK_TWPLL_384M] = &twpll_384m.hw,
+ [CLK_TWPLL_192M] = &twpll_192m.hw,
+ [CLK_TWPLL_96M] = &twpll_96m.hw,
+ [CLK_TWPLL_48M] = &twpll_48m.hw,
+ [CLK_TWPLL_24M] = &twpll_24m.hw,
+ [CLK_TWPLL_12M] = &twpll_12m.hw,
+ [CLK_TWPLL_512M] = &twpll_512m.hw,
+ [CLK_TWPLL_256M] = &twpll_256m.hw,
+ [CLK_TWPLL_128M] = &twpll_128m.hw,
+ [CLK_TWPLL_64M] = &twpll_64m.hw,
+ [CLK_TWPLL_307M2] = &twpll_307m2.hw,
+ [CLK_TWPLL_219M4] = &twpll_219m4.hw,
+ [CLK_TWPLL_170M6] = &twpll_170m6.hw,
+ [CLK_TWPLL_153M6] = &twpll_153m6.hw,
+ [CLK_TWPLL_76M8] = &twpll_76m8.hw,
+ [CLK_TWPLL_51M2] = &twpll_51m2.hw,
+ [CLK_TWPLL_38M4] = &twpll_38m4.hw,
+ [CLK_TWPLL_19M2] = &twpll_19m2.hw,
+ [CLK_TWPLL_12M29] = &twpll_12m29.hw,
+ [CLK_LPLL] = &lpll.common.hw,
+ [CLK_LPLL_614M4] = &lpll_614m4.hw,
+ [CLK_LPLL_409M6] = &lpll_409m6.hw,
+ [CLK_LPLL_245M76] = &lpll_245m76.hw,
+ [CLK_LPLL_30M72] = &lpll_30m72.hw,
+ [CLK_ISPPLL] = &isppll.common.hw,
+ [CLK_ISPPLL_468M] = &isppll_468m.hw,
+ [CLK_ISPPLL_78M] = &isppll_78m.hw,
+ [CLK_GPLL] = &gpll.common.hw,
+ [CLK_GPLL_40M] = &gpll_40m.hw,
+ [CLK_CPPLL] = &cppll.common.hw,
+ [CLK_CPPLL_39M32] = &cppll_39m32.hw,
+ },
+ .num = CLK_ANLG_PHY_GC_NUM,
+};
+
+static struct sprd_clk_desc ums512_gc_pll_desc = {
+ .clk_clks = ums512_gc_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_gc_pll_clks),
+ .hw_clks = &ums512_gc_pll_hws,
+};
+
+/* ap ahb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(dsi_eb, "dsi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dispc_eb, "dispc-eb", "ext-26m",
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(vsp_eb, "vsp-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(vdma_eb, "vdma-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dma_pub_eb, "dma-pub-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dma_sec_eb, "dma-sec-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ipi_eb, "ipi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ahb_ckg_eb, "ahb-ckg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(bm_clk_eb, "bm-clk-eb", "ext-26m",
+ 0x0, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+
+static struct sprd_clk_common *ums512_apahb_gate[] = {
+ /* address base is 0x20100000 */
+ &dsi_eb.common,
+ &dispc_eb.common,
+ &vsp_eb.common,
+ &vdma_eb.common,
+ &dma_pub_eb.common,
+ &dma_sec_eb.common,
+ &ipi_eb.common,
+ &ahb_ckg_eb.common,
+ &bm_clk_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_apahb_gate_hws = {
+ .hws = {
+ [CLK_DSI_EB] = &dsi_eb.common.hw,
+ [CLK_DISPC_EB] = &dispc_eb.common.hw,
+ [CLK_VSP_EB] = &vsp_eb.common.hw,
+ [CLK_VDMA_EB] = &vdma_eb.common.hw,
+ [CLK_DMA_PUB_EB] = &dma_pub_eb.common.hw,
+ [CLK_DMA_SEC_EB] = &dma_sec_eb.common.hw,
+ [CLK_IPI_EB] = &ipi_eb.common.hw,
+ [CLK_AHB_CKG_EB] = &ahb_ckg_eb.common.hw,
+ [CLK_BM_CLK_EB] = &bm_clk_eb.common.hw,
+ },
+ .num = CLK_AP_AHB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_apahb_gate_desc = {
+ .clk_clks = ums512_apahb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_apahb_gate),
+ .hw_clks = &ums512_apahb_gate_hws,
+};
+
+/* ap clks */
+static const struct clk_parent_data ap_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_apb_clk, "ap-apb-clk", ap_apb_parents,
+ 0x20, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ipi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ipi_clk, "ipi-clk", ipi_parents,
+ 0x24, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ap_uart_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_uart0_clk, "ap-uart0-clk", ap_uart_parents,
+ 0x28, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart1_clk, "ap-uart1-clk", ap_uart_parents,
+ 0x2c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart2_clk, "ap-uart2-clk", ap_uart_parents,
+ 0x30, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data i2c_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_i2c0_clk, "ap-i2c0-clk", i2c_parents,
+ 0x34, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c1_clk, "ap-i2c1-clk", i2c_parents,
+ 0x38, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c2_clk, "ap-i2c2-clk", i2c_parents,
+ 0x3c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c3_clk, "ap-i2c3-clk", i2c_parents,
+ 0x40, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c4_clk, "ap-i2c4-clk", i2c_parents,
+ 0x44, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_spi0_clk, "ap-spi0-clk", spi_parents,
+ 0x48, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi1_clk, "ap-spi1-clk", spi_parents,
+ 0x4c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi2_clk, "ap-spi2-clk", spi_parents,
+ 0x50, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi3_clk, "ap-spi3-clk", spi_parents,
+ 0x54, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_iis0_clk, "ap-iis0-clk", iis_parents,
+ 0x58, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis1_clk, "ap-iis1-clk", iis_parents,
+ 0x5c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis2_clk, "ap-iis2-clk", iis_parents,
+ 0x60, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data sim_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_sim_clk, "ap-sim-clk", sim_parents,
+ 0x64, 0, 3, 8, 3, 0);
+
+static const struct clk_parent_data ap_ce_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_ce_clk, "ap-ce-clk", ap_ce_parents,
+ 0x68, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sdio_parents[] = {
+ { .hw = &clk_1m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio0_2x_clk, "sdio0-2x", sdio_parents,
+ 0x80, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_2x_clk, "sdio1-2x", sdio_parents,
+ 0x88, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_2x_clk, "emmc-2x", sdio_parents,
+ 0x90, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data vsp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(vsp_clk, "vsp-clk", vsp_parents,
+ 0x98, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dispc0_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dispc0_clk, "dispc0-clk", dispc0_parents,
+ 0x9c, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dispc0_dpi_parents[] = {
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(dispc0_dpi_clk, "dispc0-dpi-clk", dispc0_dpi_parents,
+ 0xa0, 0, 3, 8, 4, 0);
+
+static const struct clk_parent_data dsi_apb_parents[] = {
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_MUX_CLK_DATA(dsi_apb_clk, "dsi-apb-clk", dsi_apb_parents,
+ 0xa4, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_rxesc, "dsi-rxesc", "ext-26m",
+ 0xa8, BIT(16), 0, 0);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_lanebyte, "dsi-lanebyte", "ext-26m",
+ 0xac, BIT(16), 0, 0);
+
+static const struct clk_parent_data vdsp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &lpll_614m4.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &isppll.common.hw },
+};
+static SPRD_MUX_CLK_DATA(vdsp_clk, "vdsp-clk", vdsp_parents,
+ 0xb0, 0, 3, UMS512_MUX_FLAG);
+static SPRD_DIV_CLK_HW(vdsp_m_clk, "vdsp-m-clk", &vdsp_clk.common.hw,
+ 0xb4, 8, 2, 0);
+
+static struct sprd_clk_common *ums512_ap_clks[] = {
+ /* address base is 0x20200000 */
+ &ap_apb_clk.common,
+ &ipi_clk.common,
+ &ap_uart0_clk.common,
+ &ap_uart1_clk.common,
+ &ap_uart2_clk.common,
+ &ap_i2c0_clk.common,
+ &ap_i2c1_clk.common,
+ &ap_i2c2_clk.common,
+ &ap_i2c3_clk.common,
+ &ap_i2c4_clk.common,
+ &ap_spi0_clk.common,
+ &ap_spi1_clk.common,
+ &ap_spi2_clk.common,
+ &ap_spi3_clk.common,
+ &ap_iis0_clk.common,
+ &ap_iis1_clk.common,
+ &ap_iis2_clk.common,
+ &ap_sim_clk.common,
+ &ap_ce_clk.common,
+ &sdio0_2x_clk.common,
+ &sdio1_2x_clk.common,
+ &emmc_2x_clk.common,
+ &vsp_clk.common,
+ &dispc0_clk.common,
+ &dispc0_dpi_clk.common,
+ &dsi_apb_clk.common,
+ &dsi_rxesc.common,
+ &dsi_lanebyte.common,
+ &vdsp_clk.common,
+ &vdsp_m_clk.common,
+
+};
+
+static struct clk_hw_onecell_data ums512_ap_clk_hws = {
+ .hws = {
+ [CLK_AP_APB] = &ap_apb_clk.common.hw,
+ [CLK_IPI] = &ipi_clk.common.hw,
+ [CLK_AP_UART0] = &ap_uart0_clk.common.hw,
+ [CLK_AP_UART1] = &ap_uart1_clk.common.hw,
+ [CLK_AP_UART2] = &ap_uart2_clk.common.hw,
+ [CLK_AP_I2C0] = &ap_i2c0_clk.common.hw,
+ [CLK_AP_I2C1] = &ap_i2c1_clk.common.hw,
+ [CLK_AP_I2C2] = &ap_i2c2_clk.common.hw,
+ [CLK_AP_I2C3] = &ap_i2c3_clk.common.hw,
+ [CLK_AP_I2C4] = &ap_i2c4_clk.common.hw,
+ [CLK_AP_SPI0] = &ap_spi0_clk.common.hw,
+ [CLK_AP_SPI1] = &ap_spi1_clk.common.hw,
+ [CLK_AP_SPI2] = &ap_spi2_clk.common.hw,
+ [CLK_AP_SPI3] = &ap_spi3_clk.common.hw,
+ [CLK_AP_IIS0] = &ap_iis0_clk.common.hw,
+ [CLK_AP_IIS1] = &ap_iis1_clk.common.hw,
+ [CLK_AP_IIS2] = &ap_iis2_clk.common.hw,
+ [CLK_AP_SIM] = &ap_sim_clk.common.hw,
+ [CLK_AP_CE] = &ap_ce_clk.common.hw,
+ [CLK_SDIO0_2X] = &sdio0_2x_clk.common.hw,
+ [CLK_SDIO1_2X] = &sdio1_2x_clk.common.hw,
+ [CLK_EMMC_2X] = &emmc_2x_clk.common.hw,
+ [CLK_VSP] = &vsp_clk.common.hw,
+ [CLK_DISPC0] = &dispc0_clk.common.hw,
+ [CLK_DISPC0_DPI] = &dispc0_dpi_clk.common.hw,
+ [CLK_DSI_APB] = &dsi_apb_clk.common.hw,
+ [CLK_DSI_RXESC] = &dsi_rxesc.common.hw,
+ [CLK_DSI_LANEBYTE] = &dsi_lanebyte.common.hw,
+ [CLK_VDSP] = &vdsp_clk.common.hw,
+ [CLK_VDSP_M] = &vdsp_m_clk.common.hw,
+ },
+ .num = CLK_AP_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_ap_clk_desc = {
+ .clk_clks = ums512_ap_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_ap_clks),
+ .hw_clks = &ums512_ap_clk_hws,
+};
+
+/* aon apb clks */
+static const struct clk_parent_data aon_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+ { .hw = &clk_13m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(aon_apb_clk, "aon-apb-clk", aon_apb_parents,
+ 0x220, 0, 3, 8, 2, 0);
+
+
+static const struct clk_parent_data adi_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_25m.hw },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(adi_clk, "adi-clk", adi_parents,
+ 0x224, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &clk_26m_aud.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &cppll_39m32.hw },
+ { .hw = &mpll0_56m88.hw },
+ { .hw = &mpll1_63m38.hw },
+ { .hw = &mpll2_47m13.hw },
+ { .hw = &dpll0_58m31.hw },
+ { .hw = &gpll_40m.hw },
+ { .hw = &twpll_48m.hw },
+};
+static const struct clk_parent_data aux1_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &clk_26m_aud.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &cppll_39m32.hw },
+ { .hw = &mpll0_56m88.hw },
+ { .hw = &mpll1_63m38.hw },
+ { .hw = &mpll2_47m13.hw },
+ { .hw = &dpll0_58m31.hw },
+ { .hw = &gpll_40m.hw },
+ { .hw = &twpll_19m2.hw },
+ { .hw = &lpll_30m72.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &twpll_12m29.hw },
+
+};
+static SPRD_COMP_CLK_DATA(aux0_clk, "aux0-clk", aux_parents,
+ 0x228, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux1_clk, "aux1-clk", aux1_parents,
+ 0x22c, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux2_clk, "aux2-clk", aux_parents,
+ 0x230, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(probe_clk, "probe-clk", aux_parents,
+ 0x234, 0, 5, 8, 4, 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(pwm0_clk, "pwm0-clk", pwm_parents,
+ 0x238, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm1_clk, "pwm1-clk", pwm_parents,
+ 0x23c, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm2_clk, "pwm2-clk", pwm_parents,
+ 0x240, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm3_clk, "pwm3-clk", pwm_parents,
+ 0x244, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data efuse_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(efuse_clk, "efuse-clk", efuse_parents,
+ 0x248, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data uart_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(uart0_clk, "uart0-clk", uart_parents,
+ 0x24c, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
+ 0x250, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data thm_parents[] = {
+ { .fw_name = "ext-32m" },
+ { .hw = &clk_250k.hw },
+};
+static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
+ 0x260, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm1_clk, "thm1-clk", thm_parents,
+ 0x264, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm2_clk, "thm2-clk", thm_parents,
+ 0x268, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm3_clk, "thm3-clk", thm_parents,
+ 0x26c, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_i2c_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_i2c_clk, "aon-i2c-clk", aon_i2c_parents,
+ 0x27c, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_iis_clk, "aon-iis-clk", aon_iis_parents,
+ 0x280, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data scc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_MUX_CLK_DATA(scc_clk, "scc-clk", scc_parents,
+ 0x284, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data apcpu_dap_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_4m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_dap_clk, "apcpu-dap-clk", apcpu_dap_parents,
+ 0x288, 0, 3, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(apcpu_dap_mtck, "apcpu-dap-mtck", "ext-26m",
+ 0x28c, BIT(16), 0, 0);
+
+static const struct clk_parent_data apcpu_ts_parents[] = {
+ { .fw_name = "ext-32m" },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_ts_clk, "apcpu-ts-clk", apcpu_ts_parents,
+ 0x290, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data debug_ts_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_MUX_CLK_DATA(debug_ts_clk, "debug-ts-clk", debug_ts_parents,
+ 0x294, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_test_s, "dsi-test-s", "ext-26m",
+ 0x298, BIT(16), 0, 0);
+
+static const struct clk_parent_data djtag_tck_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(djtag_tck_clk, "djtag-tck-clk", djtag_tck_parents,
+ 0x2b4, 0, 1, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(djtag_tck_hw, "djtag-tck-hw", "ext-26m",
+ 0x2b8, BIT(16), 0, 0);
+
+static const struct clk_parent_data aon_tmr_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(aon_tmr_clk, "aon-tmr-clk", aon_tmr_parents,
+ 0x2c0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_pmu_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+};
+static SPRD_MUX_CLK_DATA(aon_pmu_clk, "aon-pmu-clk", aon_pmu_parents,
+ 0x2c8, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data debounce_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(debounce_clk, "debounce-clk", debounce_parents,
+ 0x2cc, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data apcpu_pmu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_pmu_clk, "apcpu-pmu-clk", apcpu_pmu_parents,
+ 0x2d0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data top_dvfs_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(top_dvfs_clk, "top-dvfs-clk", top_dvfs_parents,
+ 0x2d8, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(otg_utmi, "otg-utmi", "ext-26m", 0x2dc,
+ BIT(16), 0, 0);
+
+static const struct clk_parent_data otg_ref_parents[] = {
+ { .hw = &twpll_12m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(otg_ref_clk, "otg-ref-clk", otg_ref_parents,
+ 0x2e0, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data cssys_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_COMP_CLK_DATA(cssys_clk, "cssys-clk", cssys_parents,
+ 0x2e4, 0, 3, 8, 2, 0);
+static SPRD_DIV_CLK_HW(cssys_pub_clk, "cssys-pub-clk", &cssys_clk.common.hw,
+ 0x2e8, 8, 2, 0);
+static SPRD_DIV_CLK_HW(cssys_apb_clk, "cssys-apb-clk", &cssys_clk.common.hw,
+ 0x2ec, 8, 3, 0);
+
+static const struct clk_parent_data ap_axi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_axi_clk, "ap-axi-clk", ap_axi_parents,
+ 0x2f0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ap_mm_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_mm_clk, "ap-mm-clk", ap_mm_parents,
+ 0x2f4, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sdio2_2x_parents[] = {
+ { .hw = &clk_1m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio2_2x_clk, "sdio2-2x-clk", sdio2_2x_parents,
+ 0x2f8, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data analog_io_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_COMP_CLK_DATA(analog_io_apb, "analog-io-apb", analog_io_apb_parents,
+ 0x300, 0, 1, 8, 2, 0);
+
+static const struct clk_parent_data dmc_ref_parents[] = {
+ { .hw = &clk_6m5.hw },
+ { .hw = &clk_13m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(dmc_ref_clk, "dmc-ref-clk", dmc_ref_parents,
+ 0x304, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data emc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+};
+static SPRD_MUX_CLK_DATA(emc_clk, "emc-clk", emc_parents,
+ 0x30c, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data usb_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(usb_clk, "usb-clk", usb_parents,
+ 0x310, 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data pmu_26m_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(pmu_26m_clk, "26m-pmu-clk", pmu_26m_parents,
+ 0x318, 0, 1, UMS512_MUX_FLAG);
+
+static struct sprd_clk_common *ums512_aon_apb[] = {
+ /* address base is 0x32080200 */
+ &aon_apb_clk.common,
+ &adi_clk.common,
+ &aux0_clk.common,
+ &aux1_clk.common,
+ &aux2_clk.common,
+ &probe_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &pwm3_clk.common,
+ &efuse_clk.common,
+ &uart0_clk.common,
+ &uart1_clk.common,
+ &thm0_clk.common,
+ &thm1_clk.common,
+ &thm2_clk.common,
+ &thm3_clk.common,
+ &aon_i2c_clk.common,
+ &aon_iis_clk.common,
+ &scc_clk.common,
+ &apcpu_dap_clk.common,
+ &apcpu_dap_mtck.common,
+ &apcpu_ts_clk.common,
+ &debug_ts_clk.common,
+ &dsi_test_s.common,
+ &djtag_tck_clk.common,
+ &djtag_tck_hw.common,
+ &aon_tmr_clk.common,
+ &aon_pmu_clk.common,
+ &debounce_clk.common,
+ &apcpu_pmu_clk.common,
+ &top_dvfs_clk.common,
+ &otg_utmi.common,
+ &otg_ref_clk.common,
+ &cssys_clk.common,
+ &cssys_pub_clk.common,
+ &cssys_apb_clk.common,
+ &ap_axi_clk.common,
+ &ap_mm_clk.common,
+ &sdio2_2x_clk.common,
+ &analog_io_apb.common,
+ &dmc_ref_clk.common,
+ &emc_clk.common,
+ &usb_clk.common,
+ &pmu_26m_clk.common,
+};
+
+static struct clk_hw_onecell_data ums512_aon_apb_hws = {
+ .hws = {
+ [CLK_AON_APB] = &aon_apb_clk.common.hw,
+ [CLK_ADI] = &adi_clk.common.hw,
+ [CLK_AUX0] = &aux0_clk.common.hw,
+ [CLK_AUX1] = &aux1_clk.common.hw,
+ [CLK_AUX2] = &aux2_clk.common.hw,
+ [CLK_PROBE] = &probe_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_EFUSE] = &efuse_clk.common.hw,
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART1] = &uart1_clk.common.hw,
+ [CLK_THM0] = &thm0_clk.common.hw,
+ [CLK_THM1] = &thm1_clk.common.hw,
+ [CLK_THM2] = &thm2_clk.common.hw,
+ [CLK_THM3] = &thm3_clk.common.hw,
+ [CLK_AON_I2C] = &aon_i2c_clk.common.hw,
+ [CLK_AON_IIS] = &aon_iis_clk.common.hw,
+ [CLK_SCC] = &scc_clk.common.hw,
+ [CLK_APCPU_DAP] = &apcpu_dap_clk.common.hw,
+ [CLK_APCPU_DAP_MTCK] = &apcpu_dap_mtck.common.hw,
+ [CLK_APCPU_TS] = &apcpu_ts_clk.common.hw,
+ [CLK_DEBUG_TS] = &debug_ts_clk.common.hw,
+ [CLK_DSI_TEST_S] = &dsi_test_s.common.hw,
+ [CLK_DJTAG_TCK] = &djtag_tck_clk.common.hw,
+ [CLK_DJTAG_TCK_HW] = &djtag_tck_hw.common.hw,
+ [CLK_AON_TMR] = &aon_tmr_clk.common.hw,
+ [CLK_AON_PMU] = &aon_pmu_clk.common.hw,
+ [CLK_DEBOUNCE] = &debounce_clk.common.hw,
+ [CLK_APCPU_PMU] = &apcpu_pmu_clk.common.hw,
+ [CLK_TOP_DVFS] = &top_dvfs_clk.common.hw,
+ [CLK_OTG_UTMI] = &otg_utmi.common.hw,
+ [CLK_OTG_REF] = &otg_ref_clk.common.hw,
+ [CLK_CSSYS] = &cssys_clk.common.hw,
+ [CLK_CSSYS_PUB] = &cssys_pub_clk.common.hw,
+ [CLK_CSSYS_APB] = &cssys_apb_clk.common.hw,
+ [CLK_AP_AXI] = &ap_axi_clk.common.hw,
+ [CLK_AP_MM] = &ap_mm_clk.common.hw,
+ [CLK_SDIO2_2X] = &sdio2_2x_clk.common.hw,
+ [CLK_ANALOG_IO_APB] = &analog_io_apb.common.hw,
+ [CLK_DMC_REF_CLK] = &dmc_ref_clk.common.hw,
+ [CLK_EMC] = &emc_clk.common.hw,
+ [CLK_USB] = &usb_clk.common.hw,
+ [CLK_26M_PMU] = &pmu_26m_clk.common.hw,
+ },
+ .num = CLK_AON_APB_NUM,
+};
+
+static struct sprd_clk_desc ums512_aon_apb_desc = {
+ .clk_clks = ums512_aon_apb,
+ .num_clk_clks = ARRAY_SIZE(ums512_aon_apb),
+ .hw_clks = &ums512_aon_apb_hws,
+};
+
+/* aon apb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_cal_eb, "rc100m-cal-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_tck_eb, "djtag-tck-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_eb, "djtag-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux0_eb, "aux0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux1_eb, "aux1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux2_eb, "aux2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(probe_eb, "probe-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mm_eb, "mm-eb", "ext-26m",
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(gpu_eb, "gpu-eb", "ext-26m",
+ 0x0, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mspi_eb, "mspi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_dap_eb, "apcpu-dap-eb", "ext-26m",
+ 0x0, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_cssys_eb, "aon-cssys-eb", "ext-26m",
+ 0x0, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cssys_apb_eb, "cssys-apb-eb", "ext-26m",
+ 0x0, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cssys_pub_eb, "cssys-pub-eb", "ext-26m",
+ 0x0, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdphy_cfg_eb, "sdphy-cfg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdphy_ref_eb, "sdphy-ref-eb", "ext-26m",
+ 0x0, 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(efuse_eb, "efuse-eb", "ext-26m",
+ 0x4, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(gpio_eb, "gpio-eb", "ext-26m",
+ 0x4, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mbox_eb, "mbox-eb", "ext-26m",
+ 0x4, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(kpd_eb, "kpd-eb", "ext-26m",
+ 0x4, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_syst_eb, "aon-syst-eb", "ext-26m",
+ 0x4, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_syst_eb, "ap-syst-eb", "ext-26m",
+ 0x4, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_tmr_eb, "aon-tmr-eb", "ext-26m",
+ 0x4, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_utmi_eb, "otg-utmi-eb", "ext-26m",
+ 0x4, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_phy_eb, "otg-phy-eb", "ext-26m",
+ 0x4, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(splk_eb, "splk-eb", "ext-26m",
+ 0x4, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pin_eb, "pin-eb", "ext-26m",
+ 0x4, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ana_eb, "ana-eb", "ext-26m",
+ 0x4, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_ts0_eb, "apcpu-ts0-eb", "ext-26m",
+ 0x4, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apb_busmon_eb, "apb-busmon-eb", "ext-26m",
+ 0x4, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_iis_eb, "aon-iis-eb", "ext-26m",
+ 0x4, 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(scc_eb, "scc-eb", "ext-26m",
+ 0x4, 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm0_eb, "thm0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm1_eb, "thm1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm2_eb, "thm2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(asim_top_eb, "asim-top", "ext-26m",
+ 0x8, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c_eb, "i2c-eb", "ext-26m",
+ 0x8, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pmu_eb, "pmu-eb", "ext-26m",
+ 0x8, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(adi_eb, "adi-eb", "ext-26m",
+ 0x8, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_eb, "eic-eb", "ext-26m",
+ 0x8, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc0_eb, "ap-intc0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc1_eb, "ap-intc1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc2_eb, "ap-intc2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc3_eb, "ap-intc3-eb", "ext-26m",
+ 0x8, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc4_eb, "ap-intc4-eb", "ext-26m",
+ 0x8, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc5_eb, "ap-intc5-eb", "ext-26m",
+ 0x8, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(audcp_intc_eb, "audcp-intc-eb", "ext-26m",
+ 0x8, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr0_eb, "ap-tmr0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(22), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr1_eb, "ap-tmr1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(23), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr2_eb, "ap-tmr2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(24), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm0_eb, "pwm0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm1_eb, "pwm1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm2_eb, "pwm2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm3_eb, "pwm3-eb", "ext-26m",
+ 0x8, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_wdg_eb, "ap-wdg-eb", "ext-26m",
+ 0x8, 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_wdg_eb, "apcpu-wdg-eb", "ext-26m",
+ 0x8, 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(serdes_eb, "serdes-eb", "ext-26m",
+ 0x8, 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(arch_rtc_eb, "arch-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(kpd_rtc_eb, "kpd-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_syst_rtc_eb, "aon-syst-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_syst_rtc_eb, "ap-syst-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_rtc_eb, "eic-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_rtcdv5_eb, "eic-rtcdv5-eb", "ext-26m",
+ 0x18, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ac_wdg_rtc_eb, "ac-wdg-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dcxo_lc_rtc_eb, "dcxo-lc-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(bb_cal_rtc_eb, "bb-cal-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_emmc_rtc_eb, "ap-emmc-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio0_rtc_eb, "ap-sdio0-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio1_rtc_eb, "ap-sdio1-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio2_rtc_eb, "ap-sdio2-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dsi_csi_test_eb, "dsi-csi-test-eb", "ext-26m",
+ 0x138, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_tck_en, "djtag-tck-en", "ext-26m",
+ 0x138, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dphy_ref_eb, "dphy-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dmc_ref_eb, "dmc-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_ref_eb, "otg-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(tsen_eb, "tsen-eb", "ext-26m",
+ 0x138, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(tmr_eb, "tmr-eb", "ext-26m",
+ 0x138, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_ref_eb, "rc100m-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_fdk_eb, "rc100m-fdk-eb", "ext-26m",
+ 0x138, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(debounce_eb, "debounce-eb", "ext-26m",
+ 0x138, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(det_32k_eb, "det-32k-eb", "ext-26m",
+ 0x138, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(top_cssys_en, "top-cssys-en", "ext-26m",
+ 0x13c, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_axi_en, "ap-axi-en", "ext-26m",
+ 0x13c, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_2x_en, "sdio0-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_1x_en, "sdio0-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_2x_en, "sdio1-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_1x_en, "sdio1-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_2x_en, "sdio2-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_1x_en, "sdio2-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_2x_en, "emmc-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_1x_en, "emmc-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pll_test_en, "pll-test-en", "ext-26m",
+ 0x13c, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cphy_cfg_en, "cphy-cfg-en", "ext-26m",
+ 0x13c, 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(debug_ts_en, "debug-ts-en", "ext-26m",
+ 0x13c, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(access_aud_en, "access-aud-en",
+ "ext-26m", 0x14c, 0x1000, BIT(0), 0, 0);
+
+static struct sprd_clk_common *ums512_aon_gate[] = {
+ /* address base is 0x327d0000 */
+ &rc100m_cal_eb.common,
+ &djtag_tck_eb.common,
+ &djtag_eb.common,
+ &aux0_eb.common,
+ &aux1_eb.common,
+ &aux2_eb.common,
+ &probe_eb.common,
+ &mm_eb.common,
+ &gpu_eb.common,
+ &mspi_eb.common,
+ &apcpu_dap_eb.common,
+ &aon_cssys_eb.common,
+ &cssys_apb_eb.common,
+ &cssys_pub_eb.common,
+ &sdphy_cfg_eb.common,
+ &sdphy_ref_eb.common,
+ &efuse_eb.common,
+ &gpio_eb.common,
+ &mbox_eb.common,
+ &kpd_eb.common,
+ &aon_syst_eb.common,
+ &ap_syst_eb.common,
+ &aon_tmr_eb.common,
+ &otg_utmi_eb.common,
+ &otg_phy_eb.common,
+ &splk_eb.common,
+ &pin_eb.common,
+ &ana_eb.common,
+ &apcpu_ts0_eb.common,
+ &apb_busmon_eb.common,
+ &aon_iis_eb.common,
+ &scc_eb.common,
+ &thm0_eb.common,
+ &thm1_eb.common,
+ &thm2_eb.common,
+ &asim_top_eb.common,
+ &i2c_eb.common,
+ &pmu_eb.common,
+ &adi_eb.common,
+ &eic_eb.common,
+ &ap_intc0_eb.common,
+ &ap_intc1_eb.common,
+ &ap_intc2_eb.common,
+ &ap_intc3_eb.common,
+ &ap_intc4_eb.common,
+ &ap_intc5_eb.common,
+ &audcp_intc_eb.common,
+ &ap_tmr0_eb.common,
+ &ap_tmr1_eb.common,
+ &ap_tmr2_eb.common,
+ &pwm0_eb.common,
+ &pwm1_eb.common,
+ &pwm2_eb.common,
+ &pwm3_eb.common,
+ &ap_wdg_eb.common,
+ &apcpu_wdg_eb.common,
+ &serdes_eb.common,
+ &arch_rtc_eb.common,
+ &kpd_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ac_wdg_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dcxo_lc_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &ap_emmc_rtc_eb.common,
+ &ap_sdio0_rtc_eb.common,
+ &ap_sdio1_rtc_eb.common,
+ &ap_sdio2_rtc_eb.common,
+ &dsi_csi_test_eb.common,
+ &djtag_tck_en.common,
+ &dphy_ref_eb.common,
+ &dmc_ref_eb.common,
+ &otg_ref_eb.common,
+ &tsen_eb.common,
+ &tmr_eb.common,
+ &rc100m_ref_eb.common,
+ &rc100m_fdk_eb.common,
+ &debounce_eb.common,
+ &det_32k_eb.common,
+ &top_cssys_en.common,
+ &ap_axi_en.common,
+ &sdio0_2x_en.common,
+ &sdio0_1x_en.common,
+ &sdio1_2x_en.common,
+ &sdio1_1x_en.common,
+ &sdio2_2x_en.common,
+ &sdio2_1x_en.common,
+ &emmc_2x_en.common,
+ &emmc_1x_en.common,
+ &pll_test_en.common,
+ &cphy_cfg_en.common,
+ &debug_ts_en.common,
+ &access_aud_en.common,
+};
+
+static struct clk_hw_onecell_data ums512_aon_gate_hws = {
+ .hws = {
+ [CLK_RC100M_CAL_EB] = &rc100m_cal_eb.common.hw,
+ [CLK_DJTAG_TCK_EB] = &djtag_tck_eb.common.hw,
+ [CLK_DJTAG_EB] = &djtag_eb.common.hw,
+ [CLK_AUX0_EB] = &aux0_eb.common.hw,
+ [CLK_AUX1_EB] = &aux1_eb.common.hw,
+ [CLK_AUX2_EB] = &aux2_eb.common.hw,
+ [CLK_PROBE_EB] = &probe_eb.common.hw,
+ [CLK_MM_EB] = &mm_eb.common.hw,
+ [CLK_GPU_EB] = &gpu_eb.common.hw,
+ [CLK_MSPI_EB] = &mspi_eb.common.hw,
+ [CLK_APCPU_DAP_EB] = &apcpu_dap_eb.common.hw,
+ [CLK_AON_CSSYS_EB] = &aon_cssys_eb.common.hw,
+ [CLK_CSSYS_APB_EB] = &cssys_apb_eb.common.hw,
+ [CLK_CSSYS_PUB_EB] = &cssys_pub_eb.common.hw,
+ [CLK_SDPHY_CFG_EB] = &sdphy_cfg_eb.common.hw,
+ [CLK_SDPHY_REF_EB] = &sdphy_ref_eb.common.hw,
+ [CLK_EFUSE_EB] = &efuse_eb.common.hw,
+ [CLK_GPIO_EB] = &gpio_eb.common.hw,
+ [CLK_MBOX_EB] = &mbox_eb.common.hw,
+ [CLK_KPD_EB] = &kpd_eb.common.hw,
+ [CLK_AON_SYST_EB] = &aon_syst_eb.common.hw,
+ [CLK_AP_SYST_EB] = &ap_syst_eb.common.hw,
+ [CLK_AON_TMR_EB] = &aon_tmr_eb.common.hw,
+ [CLK_OTG_UTMI_EB] = &otg_utmi_eb.common.hw,
+ [CLK_OTG_PHY_EB] = &otg_phy_eb.common.hw,
+ [CLK_SPLK_EB] = &splk_eb.common.hw,
+ [CLK_PIN_EB] = &pin_eb.common.hw,
+ [CLK_ANA_EB] = &ana_eb.common.hw,
+ [CLK_APCPU_TS0_EB] = &apcpu_ts0_eb.common.hw,
+ [CLK_APB_BUSMON_EB] = &apb_busmon_eb.common.hw,
+ [CLK_AON_IIS_EB] = &aon_iis_eb.common.hw,
+ [CLK_SCC_EB] = &scc_eb.common.hw,
+ [CLK_THM0_EB] = &thm0_eb.common.hw,
+ [CLK_THM1_EB] = &thm1_eb.common.hw,
+ [CLK_THM2_EB] = &thm2_eb.common.hw,
+ [CLK_ASIM_TOP_EB] = &asim_top_eb.common.hw,
+ [CLK_I2C_EB] = &i2c_eb.common.hw,
+ [CLK_PMU_EB] = &pmu_eb.common.hw,
+ [CLK_ADI_EB] = &adi_eb.common.hw,
+ [CLK_EIC_EB] = &eic_eb.common.hw,
+ [CLK_AP_INTC0_EB] = &ap_intc0_eb.common.hw,
+ [CLK_AP_INTC1_EB] = &ap_intc1_eb.common.hw,
+ [CLK_AP_INTC2_EB] = &ap_intc2_eb.common.hw,
+ [CLK_AP_INTC3_EB] = &ap_intc3_eb.common.hw,
+ [CLK_AP_INTC4_EB] = &ap_intc4_eb.common.hw,
+ [CLK_AP_INTC5_EB] = &ap_intc5_eb.common.hw,
+ [CLK_AUDCP_INTC_EB] = &audcp_intc_eb.common.hw,
+ [CLK_AP_TMR0_EB] = &ap_tmr0_eb.common.hw,
+ [CLK_AP_TMR1_EB] = &ap_tmr1_eb.common.hw,
+ [CLK_AP_TMR2_EB] = &ap_tmr2_eb.common.hw,
+ [CLK_PWM0_EB] = &pwm0_eb.common.hw,
+ [CLK_PWM1_EB] = &pwm1_eb.common.hw,
+ [CLK_PWM2_EB] = &pwm2_eb.common.hw,
+ [CLK_PWM3_EB] = &pwm3_eb.common.hw,
+ [CLK_AP_WDG_EB] = &ap_wdg_eb.common.hw,
+ [CLK_APCPU_WDG_EB] = &apcpu_wdg_eb.common.hw,
+ [CLK_SERDES_EB] = &serdes_eb.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPD_RTC_EB] = &kpd_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_AC_WDG_RTC_EB] = &ac_wdg_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DCXO_LC_RTC_EB] = &dcxo_lc_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_AP_EMMC_RTC_EB] = &ap_emmc_rtc_eb.common.hw,
+ [CLK_AP_SDIO0_RTC_EB] = &ap_sdio0_rtc_eb.common.hw,
+ [CLK_AP_SDIO1_RTC_EB] = &ap_sdio1_rtc_eb.common.hw,
+ [CLK_AP_SDIO2_RTC_EB] = &ap_sdio2_rtc_eb.common.hw,
+ [CLK_DSI_CSI_TEST_EB] = &dsi_csi_test_eb.common.hw,
+ [CLK_DJTAG_TCK_EN] = &djtag_tck_en.common.hw,
+ [CLK_DPHY_REF_EB] = &dphy_ref_eb.common.hw,
+ [CLK_DMC_REF_EB] = &dmc_ref_eb.common.hw,
+ [CLK_OTG_REF_EB] = &otg_ref_eb.common.hw,
+ [CLK_TSEN_EB] = &tsen_eb.common.hw,
+ [CLK_TMR_EB] = &tmr_eb.common.hw,
+ [CLK_RC100M_REF_EB] = &rc100m_ref_eb.common.hw,
+ [CLK_RC100M_FDK_EB] = &rc100m_fdk_eb.common.hw,
+ [CLK_DEBOUNCE_EB] = &debounce_eb.common.hw,
+ [CLK_DET_32K_EB] = &det_32k_eb.common.hw,
+ [CLK_TOP_CSSYS_EB] = &top_cssys_en.common.hw,
+ [CLK_AP_AXI_EN] = &ap_axi_en.common.hw,
+ [CLK_SDIO0_2X_EN] = &sdio0_2x_en.common.hw,
+ [CLK_SDIO0_1X_EN] = &sdio0_1x_en.common.hw,
+ [CLK_SDIO1_2X_EN] = &sdio1_2x_en.common.hw,
+ [CLK_SDIO1_1X_EN] = &sdio1_1x_en.common.hw,
+ [CLK_SDIO2_2X_EN] = &sdio2_2x_en.common.hw,
+ [CLK_SDIO2_1X_EN] = &sdio2_1x_en.common.hw,
+ [CLK_EMMC_2X_EN] = &emmc_2x_en.common.hw,
+ [CLK_EMMC_1X_EN] = &emmc_1x_en.common.hw,
+ [CLK_PLL_TEST_EN] = &pll_test_en.common.hw,
+ [CLK_CPHY_CFG_EN] = &cphy_cfg_en.common.hw,
+ [CLK_DEBUG_TS_EN] = &debug_ts_en.common.hw,
+ [CLK_ACCESS_AUD_EN] = &access_aud_en.common.hw,
+ },
+ .num = CLK_AON_APB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_aon_gate_desc = {
+ .clk_clks = ums512_aon_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_aon_gate),
+ .hw_clks = &ums512_aon_gate_hws,
+};
+
+/* audcp apb gates */
+/* Audcp apb clocks configure CLK_IGNORE_UNUSED because these clocks may be
+ * controlled by audcp sys at the same time. It may be cause an execption if
+ * kernel gates these clock.
+ */
+static SPRD_SC_GATE_CLK_HW(audcp_wdg_eb, "audcp-wdg-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(1),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_rtc_wdg_eb, "audcp-rtc-wdg-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(2),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr0_eb, "audcp-tmr0-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(5),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr1_eb, "audcp-tmr1-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(6),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *ums512_audcpapb_gate[] = {
+ /* address base is 0x3350d000 */
+ &audcp_wdg_eb.common,
+ &audcp_rtc_wdg_eb.common,
+ &audcp_tmr0_eb.common,
+ &audcp_tmr1_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_audcpapb_gate_hws = {
+ .hws = {
+ [CLK_AUDCP_WDG_EB] = &audcp_wdg_eb.common.hw,
+ [CLK_AUDCP_RTC_WDG_EB] = &audcp_rtc_wdg_eb.common.hw,
+ [CLK_AUDCP_TMR0_EB] = &audcp_tmr0_eb.common.hw,
+ [CLK_AUDCP_TMR1_EB] = &audcp_tmr1_eb.common.hw,
+ },
+ .num = CLK_AUDCP_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc ums512_audcpapb_gate_desc = {
+ .clk_clks = ums512_audcpapb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_audcpapb_gate),
+ .hw_clks = &ums512_audcpapb_gate_hws,
+};
+
+/* audcp ahb gates */
+/* Audcp aphb clocks configure CLK_IGNORE_UNUSED because these clocks may be
+ * controlled by audcp sys at the same time. It may be cause an execption if
+ * kernel gates these clock.
+ */
+static SPRD_SC_GATE_CLK_HW(audcp_iis0_eb, "audcp-iis0-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(0),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_iis1_eb, "audcp-iis1-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(1),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_iis2_eb, "audcp-iis2-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(2),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_uart_eb, "audcp-uart-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(4),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dma_cp_eb, "audcp-dma-cp-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(5),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dma_ap_eb, "audcp-dma-ap-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(6),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_src48k_eb, "audcp-src48k-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(10),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_mcdt_eb, "audcp-mcdt-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(12),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbcifd_eb, "audcp-vbcifd-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(13),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbc_eb, "audcp-vbc-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(14),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_splk_eb, "audcp-splk-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(15),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_icu_eb, "audcp-icu-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(16),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(dma_ap_ashb_eb, "dma-ap-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(17),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(dma_cp_ashb_eb, "dma-cp-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(18),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_aud_eb, "audcp-aud-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(19),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbc_24m_eb, "audcp-vbc-24m-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(21),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr_26m_eb, "audcp-tmr-26m-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(22),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dvfs_ashb_eb, "audcp-dvfs-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(23),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *ums512_audcpahb_gate[] = {
+ /* address base is 0x335e0000 */
+ &audcp_iis0_eb.common,
+ &audcp_iis1_eb.common,
+ &audcp_iis2_eb.common,
+ &audcp_uart_eb.common,
+ &audcp_dma_cp_eb.common,
+ &audcp_dma_ap_eb.common,
+ &audcp_src48k_eb.common,
+ &audcp_mcdt_eb.common,
+ &audcp_vbcifd_eb.common,
+ &audcp_vbc_eb.common,
+ &audcp_splk_eb.common,
+ &audcp_icu_eb.common,
+ &dma_ap_ashb_eb.common,
+ &dma_cp_ashb_eb.common,
+ &audcp_aud_eb.common,
+ &audcp_vbc_24m_eb.common,
+ &audcp_tmr_26m_eb.common,
+ &audcp_dvfs_ashb_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_audcpahb_gate_hws = {
+ .hws = {
+ [CLK_AUDCP_IIS0_EB] = &audcp_iis0_eb.common.hw,
+ [CLK_AUDCP_IIS1_EB] = &audcp_iis1_eb.common.hw,
+ [CLK_AUDCP_IIS2_EB] = &audcp_iis2_eb.common.hw,
+ [CLK_AUDCP_UART_EB] = &audcp_uart_eb.common.hw,
+ [CLK_AUDCP_DMA_CP_EB] = &audcp_dma_cp_eb.common.hw,
+ [CLK_AUDCP_DMA_AP_EB] = &audcp_dma_ap_eb.common.hw,
+ [CLK_AUDCP_SRC48K_EB] = &audcp_src48k_eb.common.hw,
+ [CLK_AUDCP_MCDT_EB] = &audcp_mcdt_eb.common.hw,
+ [CLK_AUDCP_VBCIFD_EB] = &audcp_vbcifd_eb.common.hw,
+ [CLK_AUDCP_VBC_EB] = &audcp_vbc_eb.common.hw,
+ [CLK_AUDCP_SPLK_EB] = &audcp_splk_eb.common.hw,
+ [CLK_AUDCP_ICU_EB] = &audcp_icu_eb.common.hw,
+ [CLK_AUDCP_DMA_AP_ASHB_EB] = &dma_ap_ashb_eb.common.hw,
+ [CLK_AUDCP_DMA_CP_ASHB_EB] = &dma_cp_ashb_eb.common.hw,
+ [CLK_AUDCP_AUD_EB] = &audcp_aud_eb.common.hw,
+ [CLK_AUDCP_VBC_24M_EB] = &audcp_vbc_24m_eb.common.hw,
+ [CLK_AUDCP_TMR_26M_EB] = &audcp_tmr_26m_eb.common.hw,
+ [CLK_AUDCP_DVFS_ASHB_EB] = &audcp_dvfs_ashb_eb.common.hw,
+ },
+ .num = CLK_AUDCP_AHB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc ums512_audcpahb_gate_desc = {
+ .clk_clks = ums512_audcpahb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_audcpahb_gate),
+ .hw_clks = &ums512_audcpahb_gate_hws,
+};
+
+/* gpu clocks */
+static SPRD_GATE_CLK_HW(gpu_core_gate, "gpu-core-gate", &gpu_eb.common.hw,
+ 0x4, BIT(0), 0, 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &lpll_614m4.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &gpll.common.hw },
+};
+
+static SPRD_COMP_CLK_DATA(gpu_core_clk, "gpu-core-clk", gpu_parents,
+ 0x4, 4, 3, 8, 3, 0);
+
+static SPRD_GATE_CLK_HW(gpu_mem_gate, "gpu-mem-gate", &gpu_eb.common.hw,
+ 0x8, BIT(0), 0, 0);
+
+static SPRD_COMP_CLK_DATA(gpu_mem_clk, "gpu-mem-clk", gpu_parents,
+ 0x8, 4, 3, 8, 3, 0);
+
+static SPRD_GATE_CLK_HW(gpu_sys_gate, "gpu-sys-gate", &gpu_eb.common.hw,
+ 0xc, BIT(0), 0, 0);
+
+static SPRD_DIV_CLK_HW(gpu_sys_clk, "gpu-sys-clk", &gpu_eb.common.hw,
+ 0xc, 4, 3, 0);
+
+static struct sprd_clk_common *ums512_gpu_clk[] = {
+ /* address base is 0x60100000 */
+ &gpu_core_gate.common,
+ &gpu_core_clk.common,
+ &gpu_mem_gate.common,
+ &gpu_mem_clk.common,
+ &gpu_sys_gate.common,
+ &gpu_sys_clk.common,
+};
+
+static struct clk_hw_onecell_data ums512_gpu_clk_hws = {
+ .hws = {
+ [CLK_GPU_CORE_EB] = &gpu_core_gate.common.hw,
+ [CLK_GPU_CORE] = &gpu_core_clk.common.hw,
+ [CLK_GPU_MEM_EB] = &gpu_mem_gate.common.hw,
+ [CLK_GPU_MEM] = &gpu_mem_clk.common.hw,
+ [CLK_GPU_SYS_EB] = &gpu_sys_gate.common.hw,
+ [CLK_GPU_SYS] = &gpu_sys_clk.common.hw,
+ },
+ .num = CLK_GPU_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_gpu_clk_desc = {
+ .clk_clks = ums512_gpu_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_gpu_clk),
+ .hw_clks = &ums512_gpu_clk_hws,
+};
+
+/* mm clocks */
+static const struct clk_parent_data mm_ahb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_ahb_clk, "mm-ahb-clk", mm_ahb_parents,
+ 0x20, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data mm_mtx_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_mtx_clk, "mm-mtx-clk", mm_mtx_parents,
+ 0x24, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sensor_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(sensor0_clk, "sensor0-clk", sensor_parents,
+ 0x28, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor1_clk, "sensor1-clk", sensor_parents,
+ 0x2c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor2_clk, "sensor2-clk", sensor_parents,
+ 0x30, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data cpp_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(cpp_clk, "cpp-clk", cpp_parents,
+ 0x34, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data jpg_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(jpg_clk, "jpg-clk", jpg_parents,
+ 0x38, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data fd_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(fd_clk, "fd-clk", fd_parents,
+ 0x3c, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dcam_if_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_if_clk, "dcam-if-clk", dcam_if_parents,
+ 0x40, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dcam_axi_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_axi_clk, "dcam-axi-clk", dcam_axi_parents,
+ 0x44, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data isp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(isp_clk, "isp-clk", isp_parents,
+ 0x48, 0, 3, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_HW(mipi_csi0, "mipi-csi0", &mm_eb.common.hw,
+ 0x4c, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static SPRD_GATE_CLK_HW(mipi_csi1, "mipi-csi1", &mm_eb.common.hw,
+ 0x50, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static SPRD_GATE_CLK_HW(mipi_csi2, "mipi-csi2", &mm_eb.common.hw,
+ 0x54, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static struct sprd_clk_common *ums512_mm_clk[] = {
+ /* address base is 0x62100000 */
+ &mm_ahb_clk.common,
+ &mm_mtx_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sensor2_clk.common,
+ &cpp_clk.common,
+ &jpg_clk.common,
+ &fd_clk.common,
+ &dcam_if_clk.common,
+ &dcam_axi_clk.common,
+ &isp_clk.common,
+ &mipi_csi0.common,
+ &mipi_csi1.common,
+ &mipi_csi2.common,
+};
+
+static struct clk_hw_onecell_data ums512_mm_clk_hws = {
+ .hws = {
+ [CLK_MM_AHB] = &mm_ahb_clk.common.hw,
+ [CLK_MM_MTX] = &mm_mtx_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SENSOR2] = &sensor2_clk.common.hw,
+ [CLK_CPP] = &cpp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_FD] = &fd_clk.common.hw,
+ [CLK_DCAM_IF] = &dcam_if_clk.common.hw,
+ [CLK_DCAM_AXI] = &dcam_axi_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_MIPI_CSI0] = &mipi_csi0.common.hw,
+ [CLK_MIPI_CSI1] = &mipi_csi1.common.hw,
+ [CLK_MIPI_CSI2] = &mipi_csi2.common.hw,
+ },
+ .num = CLK_MM_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_mm_clk_desc = {
+ .clk_clks = ums512_mm_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_mm_clk),
+ .hw_clks = &ums512_mm_clk_hws,
+};
+
+/* mm gate clocks */
+static SPRD_SC_GATE_CLK_HW(mm_cpp_eb, "mm-cpp-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_jpg_eb, "mm-jpg-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dcam_eb, "mm-dcam-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_eb, "mm-isp-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi2_eb, "mm-csi2-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi1_eb, "mm-csi1-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi0_eb, "mm-csi0-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_ckg_eb, "mm-ckg-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_ahb_eb, "mm-isp-ahb-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dvfs_eb, "mm-dvfs-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_fd_eb, "mm-fd-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor2_en, "mm-sensor2-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor1_en, "mm-sensor1-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor0_en, "mm-sensor0-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi2_en, "mm-mipi-csi2-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi1_en, "mm-mipi-csi1-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi0_en, "mm-mipi-csi0-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dcam_axi_en, "mm-dcam-axi-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_axi_en, "mm-isp-axi-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_cphy_en, "mm-cphy-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(8), 0, 0);
+
+static struct sprd_clk_common *ums512_mm_gate_clk[] = {
+ /* address base is 0x62200000 */
+ &mm_cpp_eb.common,
+ &mm_jpg_eb.common,
+ &mm_dcam_eb.common,
+ &mm_isp_eb.common,
+ &mm_csi2_eb.common,
+ &mm_csi1_eb.common,
+ &mm_csi0_eb.common,
+ &mm_ckg_eb.common,
+ &mm_isp_ahb_eb.common,
+ &mm_dvfs_eb.common,
+ &mm_fd_eb.common,
+ &mm_sensor2_en.common,
+ &mm_sensor1_en.common,
+ &mm_sensor0_en.common,
+ &mm_mipi_csi2_en.common,
+ &mm_mipi_csi1_en.common,
+ &mm_mipi_csi0_en.common,
+ &mm_dcam_axi_en.common,
+ &mm_isp_axi_en.common,
+ &mm_cphy_en.common,
+};
+
+static struct clk_hw_onecell_data ums512_mm_gate_clk_hws = {
+ .hws = {
+ [CLK_MM_CPP_EB] = &mm_cpp_eb.common.hw,
+ [CLK_MM_JPG_EB] = &mm_jpg_eb.common.hw,
+ [CLK_MM_DCAM_EB] = &mm_dcam_eb.common.hw,
+ [CLK_MM_ISP_EB] = &mm_isp_eb.common.hw,
+ [CLK_MM_CSI2_EB] = &mm_csi2_eb.common.hw,
+ [CLK_MM_CSI1_EB] = &mm_csi1_eb.common.hw,
+ [CLK_MM_CSI0_EB] = &mm_csi0_eb.common.hw,
+ [CLK_MM_CKG_EB] = &mm_ckg_eb.common.hw,
+ [CLK_ISP_AHB_EB] = &mm_isp_ahb_eb.common.hw,
+ [CLK_MM_DVFS_EB] = &mm_dvfs_eb.common.hw,
+ [CLK_MM_FD_EB] = &mm_fd_eb.common.hw,
+ [CLK_MM_SENSOR2_EB] = &mm_sensor2_en.common.hw,
+ [CLK_MM_SENSOR1_EB] = &mm_sensor1_en.common.hw,
+ [CLK_MM_SENSOR0_EB] = &mm_sensor0_en.common.hw,
+ [CLK_MM_MIPI_CSI2_EB] = &mm_mipi_csi2_en.common.hw,
+ [CLK_MM_MIPI_CSI1_EB] = &mm_mipi_csi1_en.common.hw,
+ [CLK_MM_MIPI_CSI0_EB] = &mm_mipi_csi0_en.common.hw,
+ [CLK_DCAM_AXI_EB] = &mm_dcam_axi_en.common.hw,
+ [CLK_ISP_AXI_EB] = &mm_isp_axi_en.common.hw,
+ [CLK_MM_CPHY_EB] = &mm_cphy_en.common.hw,
+ },
+ .num = CLK_MM_GATE_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_mm_gate_clk_desc = {
+ .clk_clks = ums512_mm_gate_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_mm_gate_clk),
+ .hw_clks = &ums512_mm_gate_clk_hws,
+};
+
+/* ap apb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis1_eb, "iis1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis2_eb, "iis2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apb_reg_eb, "apb-reg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_eb, "spi0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_eb, "spi1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_eb, "spi2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_eb, "spi3-eb", "ext-26m",
+ 0x0, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c0_eb, "i2c0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c1_eb, "i2c1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c2_eb, "i2c2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c3_eb, "i2c3-eb", "ext-26m",
+ 0x0, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m",
+ 0x0, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_32k_eb, "sim0-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_lfin_eb, "spi0-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_lfin_eb, "spi1-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_lfin_eb, "spi2-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_lfin_eb, "spi3-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_eb, "sdio0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_eb, "sdio1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(23), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_eb, "sdio2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_eb, "emmc-eb", "ext-26m",
+ 0x0, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_32k_eb, "sdio0-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_32k_eb, "sdio1-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_32k_eb, "sdio2-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_32k_eb, "emmc-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(29), 0, 0);
+
+static struct sprd_clk_common *ums512_apapb_gate[] = {
+ /* address base is 0x71000000 */
+ &sim0_eb.common,
+ &iis0_eb.common,
+ &iis1_eb.common,
+ &iis2_eb.common,
+ &apb_reg_eb.common,
+ &spi0_eb.common,
+ &spi1_eb.common,
+ &spi2_eb.common,
+ &spi3_eb.common,
+ &i2c0_eb.common,
+ &i2c1_eb.common,
+ &i2c2_eb.common,
+ &i2c3_eb.common,
+ &i2c4_eb.common,
+ &uart0_eb.common,
+ &uart1_eb.common,
+ &uart2_eb.common,
+ &sim0_32k_eb.common,
+ &spi0_lfin_eb.common,
+ &spi1_lfin_eb.common,
+ &spi2_lfin_eb.common,
+ &spi3_lfin_eb.common,
+ &sdio0_eb.common,
+ &sdio1_eb.common,
+ &sdio2_eb.common,
+ &emmc_eb.common,
+ &sdio0_32k_eb.common,
+ &sdio1_32k_eb.common,
+ &sdio2_32k_eb.common,
+ &emmc_32k_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_apapb_gate_hws = {
+ .hws = {
+ [CLK_SIM0_EB] = &sim0_eb.common.hw,
+ [CLK_IIS0_EB] = &iis0_eb.common.hw,
+ [CLK_IIS1_EB] = &iis1_eb.common.hw,
+ [CLK_IIS2_EB] = &iis2_eb.common.hw,
+ [CLK_APB_REG_EB] = &apb_reg_eb.common.hw,
+ [CLK_SPI0_EB] = &spi0_eb.common.hw,
+ [CLK_SPI1_EB] = &spi1_eb.common.hw,
+ [CLK_SPI2_EB] = &spi2_eb.common.hw,
+ [CLK_SPI3_EB] = &spi3_eb.common.hw,
+ [CLK_I2C0_EB] = &i2c0_eb.common.hw,
+ [CLK_I2C1_EB] = &i2c1_eb.common.hw,
+ [CLK_I2C2_EB] = &i2c2_eb.common.hw,
+ [CLK_I2C3_EB] = &i2c3_eb.common.hw,
+ [CLK_I2C4_EB] = &i2c4_eb.common.hw,
+ [CLK_UART0_EB] = &uart0_eb.common.hw,
+ [CLK_UART1_EB] = &uart1_eb.common.hw,
+ [CLK_UART2_EB] = &uart2_eb.common.hw,
+ [CLK_SIM0_32K_EB] = &sim0_32k_eb.common.hw,
+ [CLK_SPI0_LFIN_EB] = &spi0_lfin_eb.common.hw,
+ [CLK_SPI1_LFIN_EB] = &spi1_lfin_eb.common.hw,
+ [CLK_SPI2_LFIN_EB] = &spi2_lfin_eb.common.hw,
+ [CLK_SPI3_LFIN_EB] = &spi3_lfin_eb.common.hw,
+ [CLK_SDIO0_EB] = &sdio0_eb.common.hw,
+ [CLK_SDIO1_EB] = &sdio1_eb.common.hw,
+ [CLK_SDIO2_EB] = &sdio2_eb.common.hw,
+ [CLK_EMMC_EB] = &emmc_eb.common.hw,
+ [CLK_SDIO0_32K_EB] = &sdio0_32k_eb.common.hw,
+ [CLK_SDIO1_32K_EB] = &sdio1_32k_eb.common.hw,
+ [CLK_SDIO2_32K_EB] = &sdio2_32k_eb.common.hw,
+ [CLK_EMMC_32K_EB] = &emmc_32k_eb.common.hw,
+ },
+ .num = CLK_AP_APB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_apapb_gate_desc = {
+ .clk_clks = ums512_apapb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_apapb_gate),
+ .hw_clks = &ums512_apapb_gate_hws,
+};
+
+static const struct of_device_id sprd_ums512_clk_ids[] = {
+ { .compatible = "sprd,ums512-pmu-gate", /* 0x327e0000 */
+ .data = &ums512_pmu_gate_desc },
+ { .compatible = "sprd,ums512-g0-pll", /* 0x32390000 */
+ .data = &ums512_g0_pll_desc },
+ { .compatible = "sprd,ums512-g2-pll", /* 0x323b0000 */
+ .data = &ums512_g2_pll_desc },
+ { .compatible = "sprd,ums512-g3-pll", /* 0x323c0000 */
+ .data = &ums512_g3_pll_desc },
+ { .compatible = "sprd,ums512-gc-pll", /* 0x323e0000 */
+ .data = &ums512_gc_pll_desc },
+ { .compatible = "sprd,ums512-apahb-gate", /* 0x20100000 */
+ .data = &ums512_apahb_gate_desc },
+ { .compatible = "sprd,ums512-ap-clk", /* 0x20200000 */
+ .data = &ums512_ap_clk_desc },
+ { .compatible = "sprd,ums512-aonapb-clk", /* 0x32080200 */
+ .data = &ums512_aon_apb_desc },
+ { .compatible = "sprd,ums512-aon-gate", /* 0x327d0000 */
+ .data = &ums512_aon_gate_desc },
+ { .compatible = "sprd,ums512-audcpapb-gate", /* 0x3350d000 */
+ .data = &ums512_audcpapb_gate_desc },
+ { .compatible = "sprd,ums512-audcpahb-gate", /* 0x335e0000 */
+ .data = &ums512_audcpahb_gate_desc },
+ { .compatible = "sprd,ums512-gpu-clk", /* 0x60100000 */
+ .data = &ums512_gpu_clk_desc },
+ { .compatible = "sprd,ums512-mm-clk", /* 0x62100000 */
+ .data = &ums512_mm_clk_desc },
+ { .compatible = "sprd,ums512-mm-gate-clk", /* 0x62200000 */
+ .data = &ums512_mm_gate_clk_desc },
+ { .compatible = "sprd,ums512-apapb-gate", /* 0x71000000 */
+ .data = &ums512_apapb_gate_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sprd_ums512_clk_ids);
+
+static int ums512_clk_probe(struct platform_device *pdev)
+{
+ const struct sprd_clk_desc *desc;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ ret = sprd_clk_regmap_init(pdev, desc);
+ if (ret)
+ return ret;
+
+ return sprd_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static struct platform_driver ums512_clk_driver = {
+ .probe = ums512_clk_probe,
+ .driver = {
+ .name = "ums512-clk",
+ .of_match_table = sprd_ums512_clk_ids,
+ },
+};
+module_platform_driver(ums512_clk_driver);
+
+MODULE_DESCRIPTION("Unisoc UMS512 Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 582a22c04919..d820292a381d 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -987,6 +987,7 @@ static void __init st_of_quadfs_setup(struct device_node *np,
const char *pll_name, *clk_parent_name;
void __iomem *reg;
spinlock_t *lock;
+ struct device_node *parent_np;
/*
* First check for reg property within the node to keep backward
@@ -994,7 +995,9 @@ static void __init st_of_quadfs_setup(struct device_node *np,
*/
reg = of_iomap(np, 0);
if (!reg) {
- reg = of_iomap(of_get_parent(np), 0);
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!reg) {
pr_err("%s: Failed to get base address\n", __func__);
return;
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index ee39af7a0b72..596e939ad905 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -56,6 +56,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
void __iomem *reg;
const char **parents;
int num_parents = 0;
+ struct device_node *parent_np;
/*
* First check for reg property within the node to keep backward
@@ -63,7 +64,9 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
*/
reg = of_iomap(np, 0);
if (!reg) {
- reg = of_iomap(of_get_parent(np), 0);
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!reg) {
pr_err("%s: Failed to get base address\n", __func__);
return;
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 51058ba4db4d..8ef3cdeb7962 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -104,6 +104,8 @@ static struct ccu_nm pll_video0_4x_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .min_rate = 252000000U,
+ .max_rate = 2400000000U,
.common = {
.reg = 0x040,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-4x", osc24M,
@@ -126,6 +128,8 @@ static struct ccu_nm pll_video1_4x_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .min_rate = 252000000U,
+ .max_rate = 2400000000U,
.common = {
.reg = 0x048,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-4x", osc24M,
@@ -175,6 +179,8 @@ static struct ccu_nm pll_audio0_4x_clk = {
.m = _SUNXI_CCU_DIV(16, 6),
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
0x178, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
.common = {
.reg = 0x078,
.features = CCU_FEATURE_SIGMA_DELTA_MOD,
@@ -202,6 +208,8 @@ static struct ccu_nm pll_audio1_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
.common = {
.reg = 0x080,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1", osc24M,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 2f6f02f00be2..b70b312e7483 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -256,29 +256,19 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
mod_clk = devm_clk_get(&pdev->dev, "mod");
- if (IS_ERR(mod_clk)) {
- ret = PTR_ERR(mod_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get mod clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(mod_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mod_clk),
+ "Couldn't get mod clk\n");
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Couldn't get reset control: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
/* The clocks need to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index f2fe0e1cc3c0..1d8b1ae1619d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -213,21 +213,14 @@ static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Couldn't get reset control: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
/* The bus clock needs to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 575ae4ccc65f..a0fb0da8f356 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -101,12 +101,9 @@ static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
/* The bus clock needs to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 3748a39dae7c..d82a71f10c2c 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -349,7 +349,7 @@ static int tegra_bpmp_clk_get_info(struct tegra_bpmp *bpmp, unsigned int id,
if (err < 0)
return err;
- strlcpy(info->name, response.name, MRQ_CLK_NAME_MAXLEN);
+ strscpy(info->name, response.name, MRQ_CLK_NAME_MAXLEN);
info->num_parents = response.num_parents;
for (i = 0; i < info->num_parents; i++)
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index ef718c4b3826..73303458e886 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1166,6 +1166,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_PWM, TEGRA114_CLK_PLL_P, 408000000, 0 },
/* must be the last entry */
{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
};
@@ -1317,6 +1318,7 @@ static void __init tegra114_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 934520aab6e3..6c46592d794e 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1330,6 +1330,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_PWM, TEGRA124_CLK_PLL_P, 408000000, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
@@ -1471,6 +1472,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index be3c33441cfc..422d78247553 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1044,6 +1044,7 @@ static struct tegra_clk_init_table init_table[] = {
{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 },
+ { TEGRA20_CLK_PWM, TEGRA20_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
@@ -1131,6 +1132,7 @@ static void __init tegra20_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b9099012dc7b..a3488aaac3f7 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3597,6 +3597,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
{ TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
+ { TEGRA210_CLK_PWM, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
@@ -3748,6 +3749,7 @@ static void __init tegra210_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 04b496123820..60f1534711f1 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1237,6 +1237,7 @@ static struct tegra_clk_init_table init_table[] = {
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
{ TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
+ { TEGRA30_CLK_PWM, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
@@ -1320,6 +1321,7 @@ static void __init tegra30_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f0f5bf68b6d2..ff4d6a951681 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -245,14 +245,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (rc) {
pr_err("%s: failed to lookup atl clock %d\n", __func__,
i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto pm_put;
}
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto pm_put;
}
cdesc = to_atl_desc(__clk_get_hw(clk));
@@ -285,8 +287,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (cdesc->enabled)
atl_clk_enable(__clk_get_hw(clk));
}
- pm_runtime_put_sync(cinfo->dev);
+pm_put:
+ pm_runtime_put_sync(cinfo->dev);
return ret;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 373e9438b57a..1dc2f15fb75b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -140,11 +140,12 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
break;
}
}
- of_node_put(from);
kfree(tmp);
- if (found)
+ if (found) {
+ of_node_put(from);
return np;
+ }
/* Fall back to using old node name base provider name */
return of_find_node_by_name(from, name);
diff --git a/drivers/clk/xilinx/Kconfig b/drivers/clk/xilinx/Kconfig
index 5224114176ed..f205522c40ff 100644
--- a/drivers/clk/xilinx/Kconfig
+++ b/drivers/clk/xilinx/Kconfig
@@ -17,3 +17,15 @@ config XILINX_VCU
To compile this driver as a module, choose M here: the
module will be called xlnx_vcu.
+config COMMON_CLK_XLNX_CLKWZRD
+ tristate "Xilinx Clocking Wizard"
+ depends on COMMON_CLK && OF
+ depends on HAS_IOMEM
+ help
+ Support for the Xilinx Clocking Wizard IP core clock generator.
+ Adds support for clocking wizard and compatible.
+ This driver supports the Xilinx clocking wizard programmable clock
+ synthesizer. The number of output is configurable in the design.
+
+ If unsure, say N.
+
diff --git a/drivers/clk/xilinx/Makefile b/drivers/clk/xilinx/Makefile
index dee8fd51e303..7ac1789c6b1b 100644
--- a/drivers/clk/xilinx/Makefile
+++ b/drivers/clk/xilinx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 39367712ef54..eb1dfe7ecc1b 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -2,9 +2,10 @@
/*
* Xilinx 'Clocking Wizard' driver
*
- * Copyright (C) 2013 - 2014 Xilinx
+ * Copyright (C) 2013 - 2021 Xilinx
*
* Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
*/
#include <linux/platform_device.h>
@@ -43,6 +44,8 @@
#define WZRD_DR_INIT_REG_OFFSET 0x25C
#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+#define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
+#define WZRD_DR_BEGIN_DYNA_RECONF1_5_2 0x02
#define WZRD_USEC_POLL 10
#define WZRD_TIMEOUT_POLL 1000
@@ -164,7 +167,9 @@ static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
goto err_reconfig;
/* Initiate reconfiguration */
- writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+ writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
divider->base + WZRD_DR_INIT_REG_OFFSET);
/* Check status register */
@@ -223,7 +228,7 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
void __iomem *div_addr = divider->base + divider->offset;
- rate_div = ((parent_rate * 1000) / rate);
+ rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
clockout0_div = rate_div / 1000;
pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
@@ -245,7 +250,9 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
return err;
/* Initiate reconfiguration */
- writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+ writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
divider->base + WZRD_DR_INIT_REG_OFFSET);
/* Check status register */
@@ -441,18 +448,14 @@ static int clk_wzrd_probe(struct platform_device *pdev)
}
clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1)) {
- if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "clk_in1 not found\n");
- return PTR_ERR(clk_wzrd->clk_in1);
- }
+ if (IS_ERR(clk_wzrd->clk_in1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
+ "clk_in1 not found\n");
clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk)) {
- if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "s_axi_aclk not found\n");
- return PTR_ERR(clk_wzrd->axi_clk);
- }
+ if (IS_ERR(clk_wzrd->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
+ "s_axi_aclk not found\n");
ret = clk_prepare_enable(clk_wzrd->axi_clk);
if (ret) {
dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
@@ -479,7 +482,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- ret = of_property_read_u32(np, "nr-outputs", &nr_outputs);
+ ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
ret = -EINVAL;
goto err_disable_clk;
@@ -614,6 +617,8 @@ static int clk_wzrd_remove(struct platform_device *pdev)
static const struct of_device_id clk_wzrd_ids[] = {
{ .compatible = "xlnx,clocking-wizard" },
+ { .compatible = "xlnx,clocking-wizard-v5.2" },
+ { .compatible = "xlnx,clocking-wizard-v6.0" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index eb25303eefed..5636ff1ce552 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -163,7 +163,7 @@ static int zynqmp_get_clock_name(u32 clk_id, char *clk_name)
ret = zynqmp_is_valid_clock(clk_id);
if (ret == 1) {
- strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
+ strscpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
return 0;
}
@@ -220,18 +220,22 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
* This function is used to get name of clock specified by given
* clock ID.
*
- * Return: Returns 0
+ * Return: 0 on success else error+reason
*/
static int zynqmp_pm_clock_get_name(u32 clock_id,
struct name_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- zynqmp_pm_query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -710,9 +714,16 @@ static void zynqmp_get_clock_info(void)
FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+
+ /*
+ * Terminate with NULL character in case name provided by firmware
+ * is longer and truncated due to size limit.
+ */
+ name.name[sizeof(name.name) - 1] = '\0';
+
if (!strcmp(name.name, RESERVED_CLK_NAME))
continue;
- strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
+ strscpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
}
/* Get topology of all clock */
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 422ea79907dd..33a3b2a22659 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -113,17 +113,20 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
struct zynqmp_clk_divider *divider,
- int *bestdiv)
+ u32 *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
unsigned long div1_prate;
struct clk_hw *div1_parent_hw;
+ struct zynqmp_clk_divider *pdivider;
struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
- struct zynqmp_clk_divider *pdivider =
- to_zynqmp_clk_divider(div2_parent_hw);
+ if (!div2_parent_hw)
+ return;
+
+ pdivider = to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 91a6b4cc910e..0d3e1377b092 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -102,26 +102,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 fbdiv;
- long rate_div, f;
+ u32 mult, div;
- /* Enable the fractional mode if needed */
- rate_div = (rate * FRAC_DIV) / *prate;
- f = rate_div % FRAC_DIV;
- if (f) {
- if (rate > PS_PLL_VCO_MAX) {
- fbdiv = rate / PS_PLL_VCO_MAX;
- rate = rate / (fbdiv + 1);
- }
- if (rate < PS_PLL_VCO_MIN) {
- fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * fbdiv;
- }
- return rate;
+ /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
+ if (rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
+ rate = rate / div;
+ }
+ if (rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
+ rate = rate * mult;
}
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- return *prate * fbdiv;
+ if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ rate = *prate * fbdiv;
+ }
+
+ return rate;
}
/**
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4f2bb7315b67..4469e7f555e9 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -434,7 +434,7 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
- depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on ARCH_ARTPEC || ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9ab8221ee3c6..a7ff77550e17 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,8 +44,8 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x00
-#define CNTPCT_LO 0x08
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
#define CNTFRQ 0x10
#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
@@ -473,6 +473,8 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "ARM erratum 858921",
.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index f29c812b70c9..bfd60093ee1c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -33,7 +33,7 @@
#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
-#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
+#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x)))
#define EXYNOS4_MCT_L_MASK (0xffffff00)
#define MCT_L_TCNTB_OFFSET (0x00)
@@ -66,6 +66,8 @@
#define MCT_L0_IRQ 4
/* Max number of IRQ as per DT binding document */
#define MCT_NR_IRQS 20
+/* Max number of local timers */
+#define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ)
enum {
MCT_INT_SPI,
@@ -233,9 +235,16 @@ static cycles_t exynos4_read_current_timer(void)
}
#endif
-static int __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(bool frc_shared)
{
- exynos4_mct_frc_start();
+ /*
+ * When the frc is shared, the main processer should have already
+ * turned it on and we shouldn't be writing to TCON.
+ */
+ if (frc_shared)
+ mct_frc.resume = NULL;
+ else
+ exynos4_mct_frc_start();
#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
@@ -449,7 +458,6 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
@@ -520,8 +528,17 @@ static int __init exynos4_timer_resources(struct device_node *np)
return 0;
}
+/**
+ * exynos4_timer_interrupts - initialize MCT interrupts
+ * @np: device node for MCT
+ * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI
+ * @local_idx: array mapping CPU numbers to local timer indices
+ * @nr_local: size of @local_idx array
+ */
static int __init exynos4_timer_interrupts(struct device_node *np,
- unsigned int int_type)
+ unsigned int int_type,
+ const u32 *local_idx,
+ size_t nr_local)
{
int nr_irqs, i, err, cpu;
@@ -554,13 +571,21 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
} else {
for_each_possible_cpu(cpu) {
int mct_irq;
+ unsigned int irq_idx;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ irq_idx = MCT_L0_IRQ + local_idx[cpu];
+
pcpu_mevt->evt.irq = -1;
- if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ if (irq_idx >= ARRAY_SIZE(mct_irqs))
break;
- mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ mct_irq = mct_irqs[irq_idx];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
@@ -576,6 +601,17 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
}
}
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
+ }
+
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
"clockevents/exynos4/mct_timer:starting",
@@ -605,20 +641,49 @@ out_irq:
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
+ bool frc_shared = of_property_read_bool(np, "samsung,frc-shared");
+ u32 local_idx[MCT_NR_LOCAL] = {0};
+ int nr_local;
int ret;
+ nr_local = of_property_count_u32_elems(np, "samsung,local-timers");
+ if (nr_local == 0)
+ return -EINVAL;
+ if (nr_local > 0) {
+ if (nr_local > ARRAY_SIZE(local_idx))
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "samsung,local-timers",
+ local_idx, nr_local);
+ if (ret)
+ return ret;
+ } else {
+ int i;
+
+ nr_local = ARRAY_SIZE(local_idx);
+ for (i = 0; i < nr_local; i++)
+ local_idx[i] = i;
+ }
+
ret = exynos4_timer_resources(np);
if (ret)
return ret;
- ret = exynos4_timer_interrupts(np, int_type);
+ ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local);
if (ret)
return ret;
- ret = exynos4_clocksource_init();
+ ret = exynos4_clocksource_init(frc_shared);
if (ret)
return ret;
+ /*
+ * When the FRC is shared with a main processor, this secondary
+ * processor cannot use the global comparator.
+ */
+ if (frc_shared)
+ return ret;
+
return exynos4_clockevent_init();
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 21d1392637b8..8da972dc1713 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -224,7 +224,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_R9A07G044
+#ifdef CONFIG_ARCH_RZG2L
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 8b38b3212388..fe4fa8d7b3f1 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -171,6 +171,7 @@ static int gxp_timer_probe(struct platform_device *pdev)
{
struct platform_device *gxp_watchdog_device;
struct device *dev = &pdev->dev;
+ int ret;
if (!gxp_timer) {
pr_err("Gxp Timer not initialized, cannot create watchdog");
@@ -187,7 +188,11 @@ static int gxp_timer_probe(struct platform_device *pdev)
gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
gxp_watchdog_device->dev.parent = dev;
- return platform_device_add(gxp_watchdog_device);
+ ret = platform_device_add(gxp_watchdog_device);
+ if (ret)
+ platform_device_put(gxp_watchdog_device);
+
+ return ret;
}
static const struct of_device_id gxp_timer_of_match[] = {
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 523e37662a6e..5a7a951c4efc 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -134,8 +134,10 @@ static int __init sysctr_timer_init(struct device_node *np)
if (ret)
return ret;
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 94dc6e42e983..e5a70aa1deb4 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -26,6 +26,7 @@
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
+#define TIMER_IRQ_CLEAR(val) BIT(val)
#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
#define TIMER_CTL_ENABLE BIT(0)
#define TIMER_CTL_RELOAD BIT(1)
@@ -123,7 +124,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
static void sun4i_timer_clear_interrupt(void __iomem *base)
{
- writel(TIMER_IRQ_EN(0), base + TIMER_IRQ_ST_REG);
+ writel(TIMER_IRQ_CLEAR(0), base + TIMER_IRQ_ST_REG);
}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 469f7c91564b..cad29ded3a48 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -33,6 +33,116 @@
#include <clocksource/timer-ti-dm.h>
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
+
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED 0x00
+#define OMAP_TIMER_POSTED 0x01
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+struct timer_regs {
+ u32 ocp_cfg;
+ u32 tidr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct dmtimer {
+ struct omap_dm_timer cookie;
+ int id;
+ int irq;
+ struct clk *fclk;
+
+ void __iomem *io_base;
+ int irq_stat; /* TISR/IRQSTATUS interrupt status */
+ int irq_ena; /* irq enable */
+ int irq_dis; /* irq disable, only on v2 ip */
+ void __iomem *pend; /* write pending */
+ void __iomem *func_base; /* function register base */
+
+ atomic_t enabled;
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned posted:1;
+ unsigned omap1:1;
+ struct timer_regs context;
+ int revision;
+ u32 capability;
+ u32 errata;
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
+};
+
static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
@@ -44,27 +154,56 @@ enum {
REQUEST_BY_NODE,
};
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
+/**
+ * dmtimer_read - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
+ */
+static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- return readl_relaxed(timer->func_base + (reg & 0xff));
+ return readl_relaxed(timer->func_base + offset);
}
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
+/**
+ * dmtimer_write - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
+ */
+static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- writel_relaxed(val, timer->func_base + (reg & 0xff));
+ writel_relaxed(val, timer->func_base + offset);
}
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
{
u32 tidr;
@@ -72,16 +211,16 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
tidr = readl_relaxed(timer->io_base);
if (!(tidr >> 16)) {
timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
timer->func_base = timer->io_base;
} else {
timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
timer->pend = timer->io_base +
_OMAP_TIMER_WRITE_PEND_OFFSET +
OMAP_TIMER_V2_FUNC_OFFSET;
@@ -99,35 +238,34 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
* complete. Enabling this feature can improve performance for writing to the
* timer registers.
*/
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
{
if (timer->posted)
return;
if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
return;
}
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
timer->posted = OMAP_TIMER_POSTED;
}
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
+static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ unsigned long rate)
{
u32 l;
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
#ifdef CONFIG_ARCH_OMAP2PLUS
/* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
/*
* Wait for functional clock period x 3.5 to make sure that
* timer is stopped
@@ -137,104 +275,59 @@ static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
}
/* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
}
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
+static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
+ unsigned int value)
{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+ dmtimer_write(timer, timer->irq_ena, value);
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
}
static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+__omap_dm_timer_read_counter(struct dmtimer *timer)
{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+ return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
}
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
unsigned int value)
{
- writel_relaxed(value, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, value);
}
-/**
- * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
- * @timer: timer pointer over which read operation to perform
- * @reg: lowest byte holds the register offset
- *
- * The posted mode bit is encoded in reg. Note that in posted mode write
- * pending bit must be checked. Otherwise a read of a non completed write
- * will produce an error.
- */
-static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+static void omap_timer_restore_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- return __omap_dm_timer_read(timer, reg, timer->posted);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
+
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
+ dmtimer_write(timer, timer->irq_ena, timer->context.tier);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
}
-/**
- * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
- * @timer: timer pointer over which write operation is to perform
- * @reg: lowest byte holds the register offset
- * @value: data to write into the register
- *
- * The posted mode bit is encoded in reg. Note that in posted mode the write
- * pending bit must be checked. Otherwise a write on a register which has a
- * pending write will be lost.
- */
-static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
- u32 value)
+static void omap_timer_save_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- __omap_dm_timer_write(timer, reg, value, timer->posted);
-}
-
-static void omap_timer_restore_context(struct omap_dm_timer *timer)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
- timer->context.ocp_cfg, 0);
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
- timer->context.twer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
- timer->context.tcrr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
- timer->context.tldr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
- timer->context.tmar);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
- timer->context.tsicr);
- writel_relaxed(timer->context.tier, timer->irq_ena);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
- timer->context.tclr);
-}
-
-static void omap_timer_save_context(struct omap_dm_timer *timer)
-{
- timer->context.ocp_cfg =
- __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
-
- timer->context.tclr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- timer->context.twer =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
- timer->context.tldr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
- timer->context.tmar =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
- timer->context.tier = readl_relaxed(timer->irq_ena);
- timer->context.tsicr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
+ timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
+
+ timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tier = dmtimer_read(timer, timer->irq_ena);
+ timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
}
static int omap_timer_context_notifier(struct notifier_block *nb,
unsigned long cmd, void *v)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
- timer = container_of(nb, struct omap_dm_timer, nb);
+ timer = container_of(nb, struct dmtimer, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
@@ -256,18 +349,17 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int omap_dm_timer_reset(struct omap_dm_timer *timer)
+static int omap_dm_timer_reset(struct dmtimer *timer)
{
u32 l, timeout = 100000;
if (timer->revision != 1)
return -EINVAL;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
do {
- l = __omap_dm_timer_read(timer,
- OMAP_TIMER_V1_SYS_STAT_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
} while (!l && timeout--);
if (!timeout) {
@@ -276,22 +368,38 @@ static int omap_dm_timer_reset(struct omap_dm_timer *timer)
}
/* Configure timer for smart-idle mode */
- l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
l |= 0x2 << 0x3;
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
timer->posted = 0;
return 0;
}
-static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
+/*
+ * Functions exposed to PWM and remoteproc drivers via platform_data.
+ * Do not use these in the driver, these will get deprecated and will
+ * will be replaced by Linux generic framework functions such as
+ * chained interrupts and clock framework.
+ */
+static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
+{
+ if (!cookie)
+ return NULL;
+
+ return container_of(cookie, struct dmtimer, cookie);
+}
+
+static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
{
int ret;
const char *parent_name;
struct clk *parent;
struct dmtimer_platform_data *pdata;
+ struct dmtimer *timer;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || IS_ERR(timer->fclk))
return -EINVAL;
@@ -316,7 +424,7 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
* use the clock framework to set the parent clock. To be removed
* once OMAP1 migrated to using clock framework for dmtimers
*/
- if (pdata && pdata->set_timer_src)
+ if (timer->omap1 && pdata && pdata->set_timer_src)
return pdata->set_timer_src(timer->pdev, source);
#if defined(CONFIG_COMMON_CLK)
@@ -341,44 +449,44 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
return ret;
}
-static void omap_dm_timer_enable(struct omap_dm_timer *timer)
+static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
{
- pm_runtime_get_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+ int rc;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ dev_err(dev, "could not enable timer\n");
}
-static void omap_dm_timer_disable(struct omap_dm_timer *timer)
+static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
{
- pm_runtime_put_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+
+ pm_runtime_put_sync(dev);
}
-static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+static int omap_dm_timer_prepare(struct dmtimer *timer)
{
+ struct device *dev = &timer->pdev->dev;
int rc;
- /*
- * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
- * do not call clk_get() for these devices.
- */
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
- timer->fclk = clk_get(&timer->pdev->dev, "fck");
- if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
- dev_err(&timer->pdev->dev, ": No fclk handle.\n");
- return -EINVAL;
- }
- }
-
- omap_dm_timer_enable(timer);
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
rc = omap_dm_timer_reset(timer);
if (rc) {
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return rc;
}
}
__omap_dm_timer_enable_posted(timer);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -388,19 +496,9 @@ static inline u32 omap_dm_timer_reserved_systimer(int id)
return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
}
-int omap_dm_timer_reserve_systimer(int id)
+static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
{
- if (omap_dm_timer_reserved_systimer(id))
- return -ENODEV;
-
- omap_reserved_systimers |= (1 << (id - 1));
-
- return 0;
-}
-
-static struct omap_dm_timer *_omap_dm_timer_request(int req_type, void *data)
-{
- struct omap_dm_timer *timer = NULL, *t;
+ struct dmtimer *timer = NULL, *t;
struct device_node *np = NULL;
unsigned long flags;
u32 cap = 0;
@@ -484,11 +582,19 @@ found:
static struct omap_dm_timer *omap_dm_timer_request(void)
{
- return _omap_dm_timer_request(REQUEST_ANY, NULL);
+ struct dmtimer *timer;
+
+ timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
+ struct dmtimer *timer;
+
/* Requesting timer by ID is not supported when device tree is used */
if (of_have_populated_dt()) {
pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
@@ -496,21 +602,11 @@ static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
return NULL;
}
- return _omap_dm_timer_request(REQUEST_BY_ID, &id);
-}
+ timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
+ if (!timer)
+ return NULL;
-/**
- * omap_dm_timer_request_by_cap - Request a timer by capability
- * @cap: Bit mask of capabilities to match
- *
- * Find a timer based upon capabilities bit mask. Callers of this function
- * should use the definitions found in the plat/dmtimer.h file under the
- * comment "timer capabilities used in hwmod database". Returns pointer to
- * timer handle on success and a NULL pointer on failure.
- */
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
-{
- return _omap_dm_timer_request(REQUEST_BY_CAP, &cap);
+ return &timer->cookie;
}
/**
@@ -522,26 +618,34 @@ struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
*/
static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
{
+ struct dmtimer *timer;
+
if (!np)
return NULL;
- return _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
-static int omap_dm_timer_free(struct omap_dm_timer *timer)
+static int omap_dm_timer_free(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- clk_put(timer->fclk);
-
WARN_ON(!timer->reserved);
timer->reserved = 0;
return 0;
}
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
+int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
if (timer)
return timer->irq;
return -EINVAL;
@@ -550,7 +654,7 @@ int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
#if defined(CONFIG_ARCH_OMAP1)
#include <linux/soc/ti/omap1-io.h>
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
return NULL;
}
@@ -562,7 +666,7 @@ static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
int i = 0;
- struct omap_dm_timer *timer = NULL;
+ struct dmtimer *timer = NULL;
unsigned long flags;
/* If ARMXOR cannot be idled this function call is unnecessary */
@@ -574,7 +678,7 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
list_for_each_entry(timer, &omap_timer_list, node) {
u32 l;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
@@ -590,8 +694,10 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#else
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
+
if (timer && !IS_ERR(timer->fclk))
return timer->fclk;
return NULL;
@@ -606,95 +712,125 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#endif
-int omap_dm_timer_trigger(struct omap_dm_timer *timer)
-{
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
- return -EINVAL;
- }
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
- return 0;
-}
-
-static int omap_dm_timer_start(struct omap_dm_timer *timer)
+static int omap_dm_timer_start(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
}
return 0;
}
-static int omap_dm_timer_stop(struct omap_dm_timer *timer)
+static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
unsigned long rate = 0;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
+ dev = &timer->pdev->dev;
+
+ if (!timer->omap1)
rate = clk_get_rate(timer->fclk);
- __omap_dm_timer_stop(timer, timer->posted, rate);
+ __omap_dm_timer_stop(timer, rate);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_load(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
unsigned int load)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
unsigned int match)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
else
l &= ~OMAP_TIMER_CTRL_CE;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
if (def_on)
@@ -704,57 +840,86 @@ static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
l |= trigger << 10;
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *timer)
+static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- omap_dm_timer_disable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ pm_runtime_put_sync(dev);
return l;
}
-static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
- int prescaler)
+static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
+ int prescaler)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0) {
l |= OMAP_TIMER_CTRL_PRE;
l |= prescaler << 2;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
unsigned int value)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
__omap_dm_timer_int_enable(timer, value);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
+
return 0;
}
@@ -765,42 +930,55 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
*
* Disables the specified timer interrupts for a timer.
*/
-static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
+static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
{
+ struct dmtimer *timer;
+ struct device *dev;
u32 l = mask;
+ int rc;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->revision == 1)
- l = readl_relaxed(timer->irq_ena) & ~mask;
+ l = dmtimer_read(timer, timer->irq_ena) & ~mask;
+
+ dmtimer_write(timer, timer->irq_dis, l);
+ l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
- writel_relaxed(l, timer->irq_dis);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
unsigned int l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
- l = readl_relaxed(timer->irq_stat);
+ l = dmtimer_read(timer, timer->irq_stat);
return l;
}
-static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled)))
return -EINVAL;
@@ -809,49 +987,39 @@ static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int
return 0;
}
-static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
- return __omap_dm_timer_read_counter(timer, timer->posted);
+ return __omap_dm_timer_read_counter(timer);
}
-static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
-int omap_dm_timers_active(void)
-{
- struct omap_dm_timer *timer;
-
- list_for_each_entry(timer, &omap_timer_list, node) {
- if (!timer->reserved)
- continue;
-
- if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
- OMAP_TIMER_CTRL_ST) {
- return 1;
- }
- }
- return 0;
-}
-
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
atomic_set(&timer->enabled, 0);
@@ -865,7 +1033,7 @@ static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
omap_timer_restore_context(timer);
@@ -892,7 +1060,7 @@ static const struct of_device_id omap_timer_match[];
static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -916,7 +1084,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (timer->irq < 0)
return timer->irq;
- timer->fclk = ERR_PTR(-ENODEV);
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -938,6 +1105,17 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
}
+ timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
+
+ /* OMAP1 devices do not yet use the clock framework for dmtimers */
+ if (!timer->omap1) {
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+
if (!(timer->capability & OMAP_TIMER_ALWON)) {
timer->nb.notifier_call = omap_timer_context_notifier;
cpu_pm_register_notifier(&timer->nb);
@@ -950,11 +1128,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (!timer->reserved) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
- goto err_get_sync;
+ goto err_disable;
}
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
@@ -969,8 +1147,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return 0;
-err_get_sync:
- pm_runtime_put_noidle(dev);
+err_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -985,7 +1162,7 @@ err_get_sync:
*/
static int omap_dm_timer_remove(struct platform_device *pdev)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
unsigned long flags;
int ret = -EINVAL;
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 55a0cae04b8d..e2114bcf815a 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -396,7 +396,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t max_read_buffer_kb_store(struct device *csdev,
@@ -452,7 +452,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t read_buffer_kb_store(struct device *csdev,
@@ -509,7 +509,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t max_write_buffer_kb_store(struct device *csdev,
@@ -565,7 +565,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t write_buffer_kb_store(struct device *csdev,
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 4407203e0c9b..77a863b7eefe 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -28,7 +28,8 @@ module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
static unsigned int irq[max_num_isa_dev(QUAD8_EXTENT)];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
#define QUAD8_NUM_COUNTERS 8
@@ -549,6 +550,32 @@ static int quad8_index_polarity_set(struct counter_device *counter,
return 0;
}
+static int quad8_polarity_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_polarity *polarity)
+{
+ int err;
+ u32 index_polarity;
+
+ err = quad8_index_polarity_get(counter, signal, &index_polarity);
+ if (err)
+ return err;
+
+ *polarity = (index_polarity) ? COUNTER_SIGNAL_POLARITY_POSITIVE :
+ COUNTER_SIGNAL_POLARITY_NEGATIVE;
+
+ return 0;
+}
+
+static int quad8_polarity_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_polarity polarity)
+{
+ const u32 pol = (polarity == COUNTER_SIGNAL_POLARITY_POSITIVE) ? 1 : 0;
+
+ return quad8_index_polarity_set(counter, signal, pol);
+}
+
static const char *const quad8_synchronous_modes[] = {
"non-synchronous",
"synchronous"
@@ -977,6 +1004,13 @@ static struct counter_comp quad8_signal_ext[] = {
quad8_signal_fck_prescaler_write)
};
+static const enum counter_signal_polarity quad8_polarities[] = {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+static DEFINE_COUNTER_AVAILABLE(quad8_polarity_available, quad8_polarities);
+
static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes);
static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes);
@@ -984,6 +1018,8 @@ static struct counter_comp quad8_index_ext[] = {
COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get,
quad8_index_polarity_set,
quad8_index_pol_enum),
+ COUNTER_COMP_POLARITY(quad8_polarity_read, quad8_polarity_write,
+ quad8_polarity_available),
COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get,
quad8_synchronous_mode_set,
quad8_synch_mode_enum),
@@ -1236,8 +1272,9 @@ static struct isa_driver quad8_driver = {
}
};
-module_isa_driver(quad8_driver, num_quad8);
+module_isa_driver_with_irq(quad8_driver, num_quad8, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 5edd155f1911..d388bf26f4dc 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -101,4 +101,19 @@ config INTEL_QEP
To compile this driver as a module, choose M here: the module
will be called intel-qep.
+config TI_ECAP_CAPTURE
+ tristate "TI eCAP capture driver"
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Capture
+ (eCAP) driver in input mode.
+
+ It can be used to timestamp events (falling/rising edges) detected
+ on ECAP input signal.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-ecap-capture.
+
endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 8fde6c100ebc..b9a369e0d4fc 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
obj-$(CONFIG_MICROCHIP_TCB_CAPTURE) += microchip-tcb-capture.o
obj-$(CONFIG_INTEL_QEP) += intel-qep.o
+obj-$(CONFIG_TI_ECAP_CAPTURE) += ti-ecap-capture.o
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index 69d340be9c93..80acdf62794a 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -40,7 +40,11 @@ struct counter_comp_node {
a.signal_u32_read == b.signal_u32_read || \
a.device_u64_read == b.device_u64_read || \
a.count_u64_read == b.count_u64_read || \
- a.signal_u64_read == b.signal_u64_read)
+ a.signal_u64_read == b.signal_u64_read || \
+ a.signal_array_u32_read == b.signal_array_u32_read || \
+ a.device_array_u64_read == b.device_array_u64_read || \
+ a.count_array_u64_read == b.count_array_u64_read || \
+ a.signal_array_u64_read == b.signal_array_u64_read)
#define counter_comp_read_is_set(comp) \
(comp.action_read || \
@@ -52,7 +56,11 @@ struct counter_comp_node {
comp.signal_u32_read || \
comp.device_u64_read || \
comp.count_u64_read || \
- comp.signal_u64_read)
+ comp.signal_u64_read || \
+ comp.signal_array_u32_read || \
+ comp.device_array_u64_read || \
+ comp.count_array_u64_read || \
+ comp.signal_array_u64_read)
static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
size_t len, loff_t *f_ps)
@@ -228,6 +236,31 @@ static int counter_disable_events(struct counter_device *const counter)
return err;
}
+static int counter_get_ext(const struct counter_comp *const ext,
+ const size_t num_ext, const size_t component_id,
+ size_t *const ext_idx, size_t *const id)
+{
+ struct counter_array *element;
+
+ *id = 0;
+ for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
+ if (*id == component_id)
+ return 0;
+
+ if (ext->type == COUNTER_COMP_ARRAY) {
+ element = ext->priv;
+
+ if (component_id - *id < element->length)
+ return 0;
+
+ *id += element->length;
+ } else
+ (*id)++;
+ }
+
+ return -EINVAL;
+}
+
static int counter_add_watch(struct counter_device *const counter,
const unsigned long arg)
{
@@ -237,6 +270,7 @@ static int counter_add_watch(struct counter_device *const counter,
size_t parent, id;
struct counter_comp *ext;
size_t num_ext;
+ size_t ext_idx, ext_id;
int err = 0;
if (copy_from_user(&watch, uwatch, sizeof(watch)))
@@ -314,11 +348,11 @@ static int counter_add_watch(struct counter_device *const counter,
comp_node.comp.priv = counter->counts[parent].synapses + id;
break;
case COUNTER_COMPONENT_EXTENSION:
- if (id >= num_ext)
- return -EINVAL;
- id = array_index_nospec(id, num_ext);
+ err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
+ if (err < 0)
+ return err;
- comp_node.comp = ext[id];
+ comp_node.comp = ext[ext_idx];
break;
default:
return -EINVAL;
@@ -451,14 +485,56 @@ void counter_chrdev_remove(struct counter_device *const counter)
kfifo_free(&counter->events);
}
+static int counter_get_array_data(struct counter_device *const counter,
+ const enum counter_scope scope,
+ void *const parent,
+ const struct counter_comp *const comp,
+ const size_t idx, u64 *const value)
+{
+ const struct counter_array *const element = comp->priv;
+ u32 value_u32 = 0;
+ int ret;
+
+ switch (element->type) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ if (scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+ ret = comp->signal_array_u32_read(counter, parent, idx,
+ &value_u32);
+ *value = value_u32;
+ return ret;
+ case COUNTER_COMP_U64:
+ switch (scope) {
+ case COUNTER_SCOPE_DEVICE:
+ return comp->device_array_u64_read(counter, idx, value);
+ case COUNTER_SCOPE_SIGNAL:
+ return comp->signal_array_u64_read(counter, parent, idx,
+ value);
+ case COUNTER_SCOPE_COUNT:
+ return comp->count_array_u64_read(counter, parent, idx,
+ value);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
static int counter_get_data(struct counter_device *const counter,
const struct counter_comp_node *const comp_node,
u64 *const value)
{
const struct counter_comp *const comp = &comp_node->comp;
- void *const parent = comp_node->parent;
+ const enum counter_scope scope = comp_node->component.scope;
+ const size_t id = comp_node->component.id;
+ struct counter_signal *const signal = comp_node->parent;
+ struct counter_count *const count = comp_node->parent;
u8 value_u8 = 0;
u32 value_u32 = 0;
+ const struct counter_comp *ext;
+ size_t num_ext;
+ size_t ext_idx, ext_id;
int ret;
if (comp_node->component.type == COUNTER_COMPONENT_NONE)
@@ -467,15 +543,15 @@ static int counter_get_data(struct counter_device *const counter,
switch (comp->type) {
case COUNTER_COMP_U8:
case COUNTER_COMP_BOOL:
- switch (comp_node->component.scope) {
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u8_read(counter, &value_u8);
break;
case COUNTER_SCOPE_SIGNAL:
- ret = comp->signal_u8_read(counter, parent, &value_u8);
+ ret = comp->signal_u8_read(counter, signal, &value_u8);
break;
case COUNTER_SCOPE_COUNT:
- ret = comp->count_u8_read(counter, parent, &value_u8);
+ ret = comp->count_u8_read(counter, count, &value_u8);
break;
default:
return -EINVAL;
@@ -487,16 +563,17 @@ static int counter_get_data(struct counter_device *const counter,
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
- switch (comp_node->component.scope) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u32_read(counter, &value_u32);
break;
case COUNTER_SCOPE_SIGNAL:
- ret = comp->signal_u32_read(counter, parent,
+ ret = comp->signal_u32_read(counter, signal,
&value_u32);
break;
case COUNTER_SCOPE_COUNT:
- ret = comp->count_u32_read(counter, parent, &value_u32);
+ ret = comp->count_u32_read(counter, count, &value_u32);
break;
default:
return -EINVAL;
@@ -504,21 +581,43 @@ static int counter_get_data(struct counter_device *const counter,
*value = value_u32;
return ret;
case COUNTER_COMP_U64:
- switch (comp_node->component.scope) {
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
return comp->device_u64_read(counter, value);
case COUNTER_SCOPE_SIGNAL:
- return comp->signal_u64_read(counter, parent, value);
+ return comp->signal_u64_read(counter, signal, value);
case COUNTER_SCOPE_COUNT:
- return comp->count_u64_read(counter, parent, value);
+ return comp->count_u64_read(counter, count, value);
default:
return -EINVAL;
}
case COUNTER_COMP_SYNAPSE_ACTION:
- ret = comp->action_read(counter, parent, comp->priv,
- &value_u32);
+ ret = comp->action_read(counter, count, comp->priv, &value_u32);
*value = value_u32;
return ret;
+ case COUNTER_COMP_ARRAY:
+ switch (scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ext = counter->ext;
+ num_ext = counter->num_ext;
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ext = signal->ext;
+ num_ext = signal->num_ext;
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ext = count->ext;
+ num_ext = count->num_ext;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
+ if (ret < 0)
+ return ret;
+
+ return counter_get_array_data(counter, scope, comp_node->parent,
+ comp, id - ext_id, value);
default:
return -EINVAL;
}
@@ -574,4 +673,4 @@ exit_early:
if (copied)
wake_up_poll(&counter->events_wait, EPOLLIN);
}
-EXPORT_SYMBOL_GPL(counter_push_event);
+EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 938651f9e9e0..09c77afb33ca 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -73,7 +73,7 @@ void *counter_priv(const struct counter_device *const counter)
return &ch->privdata;
}
-EXPORT_SYMBOL_GPL(counter_priv);
+EXPORT_SYMBOL_NS_GPL(counter_priv, COUNTER);
/**
* counter_alloc - allocate a counter_device
@@ -133,13 +133,13 @@ err_ida_alloc:
return NULL;
}
-EXPORT_SYMBOL_GPL(counter_alloc);
+EXPORT_SYMBOL_NS_GPL(counter_alloc, COUNTER);
void counter_put(struct counter_device *counter)
{
put_device(&counter->dev);
}
-EXPORT_SYMBOL_GPL(counter_put);
+EXPORT_SYMBOL_NS_GPL(counter_put, COUNTER);
/**
* counter_add - complete registration of a counter
@@ -166,7 +166,7 @@ int counter_add(struct counter_device *counter)
/* implies device_add(dev) */
return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_GPL(counter_add);
+EXPORT_SYMBOL_NS_GPL(counter_add, COUNTER);
/**
* counter_unregister - unregister Counter from the system
@@ -188,7 +188,7 @@ void counter_unregister(struct counter_device *const counter)
mutex_unlock(&counter->ops_exist_lock);
}
-EXPORT_SYMBOL_GPL(counter_unregister);
+EXPORT_SYMBOL_NS_GPL(counter_unregister, COUNTER);
static void devm_counter_release(void *counter)
{
@@ -223,7 +223,7 @@ struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv
return counter;
}
-EXPORT_SYMBOL_GPL(devm_counter_alloc);
+EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, COUNTER);
/**
* devm_counter_add - complete registration of a counter
@@ -244,7 +244,7 @@ int devm_counter_add(struct device *dev,
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_GPL(devm_counter_add);
+EXPORT_SYMBOL_NS_GPL(devm_counter_add, COUNTER);
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
index 04eac41dad33..b9efe66f9f8d 100644
--- a/drivers/counter/counter-sysfs.c
+++ b/drivers/counter/counter-sysfs.c
@@ -91,6 +91,11 @@ static const char *const counter_count_mode_str[] = {
[COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
};
+static const char *const counter_signal_polarity_str[] = {
+ [COUNTER_SIGNAL_POLARITY_POSITIVE] = "positive",
+ [COUNTER_SIGNAL_POLARITY_NEGATIVE] = "negative"
+};
+
static ssize_t counter_comp_u8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -201,6 +206,8 @@ static ssize_t counter_comp_u32_show(struct device *dev,
return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]);
case COUNTER_COMP_COUNT_MODE:
return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]);
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
default:
return sysfs_emit(buf, "%u\n", (unsigned int)data);
}
@@ -252,6 +259,10 @@ static ssize_t counter_comp_u32_store(struct device *dev,
err = counter_find_enum(&data, avail->enums, avail->num_items,
buf, counter_count_mode_str);
break;
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ err = counter_find_enum(&data, avail->enums, avail->num_items,
+ buf, counter_signal_polarity_str);
+ break;
default:
err = kstrtou32(buf, 0, &data);
break;
@@ -341,6 +352,124 @@ static ssize_t counter_comp_u64_store(struct device *dev,
return len;
}
+static ssize_t counter_comp_array_u32_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ if (a->scope != COUNTER_SCOPE_SIGNAL ||
+ element->type != COUNTER_COMP_SIGNAL_POLARITY)
+ return -EINVAL;
+
+ err = a->comp.signal_array_u32_read(counter, a->parent, element->idx,
+ &data);
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
+}
+
+static ssize_t counter_comp_array_u32_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ if (element->type != COUNTER_COMP_SIGNAL_POLARITY ||
+ a->scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+
+ err = counter_find_enum(&data, element->avail->enums,
+ element->avail->num_items, buf,
+ counter_signal_polarity_str);
+ if (err < 0)
+ return err;
+
+ err = a->comp.signal_array_u32_write(counter, a->parent, element->idx,
+ data);
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_array_u64_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u64 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_array_u64_read(counter, element->idx,
+ &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_array_u64_read(counter, a->parent,
+ element->idx, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_array_u64_read(counter, a->parent,
+ element->idx, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
+}
+
+static ssize_t counter_comp_array_u64_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u64 data = 0;
+
+ err = kstrtou64(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_array_u64_write(counter, element->idx,
+ data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_array_u64_write(counter, a->parent,
+ element->idx, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_array_u64_write(counter, a->parent,
+ element->idx, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
static ssize_t enums_available_show(const u32 *const enums,
const size_t num_enums,
const char *const strs[], char *buf)
@@ -435,6 +564,7 @@ static int counter_attr_create(struct device *const dev,
const enum counter_scope scope,
void *const parent)
{
+ const struct counter_array *const array = comp->priv;
struct counter_attribute *counter_attr;
struct device_attribute *dev_attr;
@@ -469,6 +599,7 @@ static int counter_attr_create(struct device *const dev,
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
+ case COUNTER_COMP_SIGNAL_POLARITY:
if (comp->device_u32_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_u32_show;
@@ -488,6 +619,32 @@ static int counter_attr_create(struct device *const dev,
dev_attr->store = counter_comp_u64_store;
}
break;
+ case COUNTER_COMP_ARRAY:
+ switch (array->type) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ if (comp->signal_array_u32_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_array_u32_show;
+ }
+ if (comp->signal_array_u32_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_array_u32_store;
+ }
+ break;
+ case COUNTER_COMP_U64:
+ if (comp->device_array_u64_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_array_u64_show;
+ }
+ if (comp->device_array_u64_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_array_u64_store;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -580,6 +737,95 @@ static int counter_comp_id_attr_create(struct device *const dev,
return 0;
}
+static int counter_ext_attrs_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const ext,
+ const enum counter_scope scope,
+ void *const parent, const size_t id)
+{
+ int err;
+
+ /* Create main extension attribute */
+ err = counter_attr_create(dev, group, ext, scope, parent);
+ if (err < 0)
+ return err;
+
+ /* Create extension id attribute */
+ return counter_comp_id_attr_create(dev, group, ext->name, id);
+}
+
+static int counter_array_attrs_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp,
+ const enum counter_scope scope,
+ void *const parent, const size_t id)
+{
+ const struct counter_array *const array = comp->priv;
+ struct counter_comp ext = *comp;
+ struct counter_array *element;
+ size_t idx;
+ int err;
+
+ /* Create an attribute for each array element */
+ for (idx = 0; idx < array->length; idx++) {
+ /* Generate array element attribute name */
+ ext.name = devm_kasprintf(dev, GFP_KERNEL, "%s%zu", comp->name,
+ idx);
+ if (!ext.name)
+ return -ENOMEM;
+
+ /* Allocate and configure array element */
+ element = devm_kzalloc(dev, sizeof(*element), GFP_KERNEL);
+ if (!element)
+ return -ENOMEM;
+ element->type = array->type;
+ element->avail = array->avail;
+ element->idx = idx;
+ ext.priv = element;
+
+ /* Create all attributes associated with the array element */
+ err = counter_ext_attrs_create(dev, group, &ext, scope, parent,
+ id + idx);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_exts_add(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const exts,
+ const size_t num_ext,
+ const enum counter_scope scope,
+ void *const parent)
+{
+ size_t i;
+ const struct counter_comp *ext;
+ int err;
+ size_t id = 0;
+ const struct counter_array *array;
+
+ /* Create attributes for each extension */
+ for (i = 0; i < num_ext; i++) {
+ ext = &exts[i];
+ if (ext->type == COUNTER_COMP_ARRAY) {
+ err = counter_array_attrs_create(dev, group, ext, scope,
+ parent, id);
+ array = ext->priv;
+ id += array->length;
+ } else {
+ err = counter_ext_attrs_create(dev, group, ext, scope,
+ parent, id);
+ id++;
+ }
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static struct counter_comp counter_signal_comp = {
.type = COUNTER_COMP_SIGNAL_LEVEL,
.name = "signal",
@@ -593,8 +839,6 @@ static int counter_signal_attrs_create(struct counter_device *const counter,
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
- size_t i;
- struct counter_comp *ext;
/* Create main Signal attribute */
comp = counter_signal_comp;
@@ -608,21 +852,9 @@ static int counter_signal_attrs_create(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < signal->num_ext; i++) {
- ext = &signal->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, signal);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
-
- return 0;
+ /* Add Signal extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, signal->ext,
+ signal->num_ext, scope, signal);
}
static int counter_sysfs_signals_add(struct counter_device *const counter,
@@ -707,8 +939,6 @@ static int counter_count_attrs_create(struct counter_device *const counter,
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
- size_t i;
- struct counter_comp *ext;
/* Create main Count attribute */
comp = counter_count_comp;
@@ -731,21 +961,9 @@ static int counter_count_attrs_create(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < count->num_ext; i++) {
- ext = &count->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, count);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
-
- return 0;
+ /* Add Count extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, count->ext,
+ count->num_ext, scope, count);
}
static int counter_sysfs_counts_add(struct counter_device *const counter,
@@ -838,8 +1056,6 @@ static int counter_sysfs_attr_add(struct counter_device *const counter,
const enum counter_scope scope = COUNTER_SCOPE_DEVICE;
struct device *const dev = &counter->dev;
int err;
- size_t i;
- struct counter_comp *ext;
/* Add Signals sysfs attributes */
err = counter_sysfs_signals_add(counter, cattr_group);
@@ -876,19 +1092,9 @@ static int counter_sysfs_attr_add(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < counter->num_ext; i++) {
- ext = &counter->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, NULL);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
+ /* Add device extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, counter->ext,
+ counter->num_ext, scope, NULL);
return 0;
}
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 2a58582a9df4..aea6622a9b13 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -325,3 +325,4 @@ module_platform_driver(ftm_quaddec_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com>");
MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index 47a6a9dfc9e8..af5942e66f7d 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -523,3 +523,4 @@ MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 3b13f56bbb11..229473855c5b 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -139,12 +139,23 @@ static int interrupt_cnt_signal_read(struct counter_device *counter,
return 0;
}
+static int interrupt_cnt_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel != 0 ||
+ watch->event != COUNTER_EVENT_CHANGE_OF_STATE)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct counter_ops interrupt_cnt_ops = {
.action_read = interrupt_cnt_action_read,
.count_read = interrupt_cnt_read,
.count_write = interrupt_cnt_write,
.function_read = interrupt_cnt_function_read,
.signal_read = interrupt_cnt_signal_read,
+ .watch_validate = interrupt_cnt_watch_validate,
};
static int interrupt_cnt_probe(struct platform_device *pdev)
@@ -242,3 +253,4 @@ MODULE_ALIAS("platform:interrupt-counter");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
MODULE_DESCRIPTION("Interrupt counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 00844445143b..f9dee15d9777 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -394,3 +394,4 @@ module_platform_driver(mchp_tc_driver);
MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
MODULE_DESCRIPTION("Microchip TCB Capture driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 68031d93ce89..d6b80b6dfc28 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -520,3 +520,4 @@ MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
MODULE_ALIAS("platform:stm32-lptimer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 5779ae7c73cf..9bf20a5d6bda 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -417,3 +417,4 @@ MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_ALIAS("platform:stm32-timer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
new file mode 100644
index 000000000000..af10de30aba5
--- /dev/null
+++ b/drivers/counter/ti-ecap-capture.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ECAP Capture driver
+ *
+ * Copyright (C) 2022 Julien Panis <jpanis@baylibre.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/counter.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define ECAP_DRV_NAME "ecap"
+
+/* ECAP event IDs */
+#define ECAP_CEVT1 0
+#define ECAP_CEVT2 1
+#define ECAP_CEVT3 2
+#define ECAP_CEVT4 3
+#define ECAP_CNTOVF 4
+
+#define ECAP_CEVT_LAST ECAP_CEVT4
+#define ECAP_NB_CEVT (ECAP_CEVT_LAST + 1)
+
+#define ECAP_EVT_LAST ECAP_CNTOVF
+#define ECAP_NB_EVT (ECAP_EVT_LAST + 1)
+
+/* Registers */
+#define ECAP_TSCNT_REG 0x00
+
+#define ECAP_CAP_REG(i) (((i) << 2) + 0x08)
+
+#define ECAP_ECCTL_REG 0x28
+#define ECAP_CAPPOL_BIT(i) BIT((i) << 1)
+#define ECAP_EV_MODE_MASK GENMASK(7, 0)
+#define ECAP_CAPLDEN_BIT BIT(8)
+#define ECAP_CONT_ONESHT_BIT BIT(16)
+#define ECAP_STOPVALUE_MASK GENMASK(18, 17)
+#define ECAP_TSCNTSTP_BIT BIT(20)
+#define ECAP_SYNCO_DIS_MASK GENMASK(23, 22)
+#define ECAP_CAP_APWM_BIT BIT(25)
+#define ECAP_ECCTL_EN_MASK (ECAP_CAPLDEN_BIT | ECAP_TSCNTSTP_BIT)
+#define ECAP_ECCTL_CFG_MASK (ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK \
+ | ECAP_ECCTL_EN_MASK | ECAP_CAP_APWM_BIT \
+ | ECAP_CONT_ONESHT_BIT)
+
+#define ECAP_ECINT_EN_FLG_REG 0x2c
+#define ECAP_EVT_EN_MASK GENMASK(ECAP_NB_EVT, ECAP_NB_CEVT)
+#define ECAP_EVT_FLG_BIT(i) BIT((i) + 17)
+
+#define ECAP_ECINT_CLR_FRC_REG 0x30
+#define ECAP_INT_CLR_BIT BIT(0)
+#define ECAP_EVT_CLR_BIT(i) BIT((i) + 1)
+#define ECAP_EVT_CLR_MASK GENMASK(ECAP_NB_EVT, 0)
+
+#define ECAP_PID_REG 0x5c
+
+/* ECAP signals */
+#define ECAP_CLOCK_SIG 0
+#define ECAP_INPUT_SIG 1
+
+static const struct regmap_config ecap_cnt_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ECAP_PID_REG,
+};
+
+/**
+ * struct ecap_cnt_dev - device private data structure
+ * @enabled: device state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @clk: device clock
+ * @regmap: device register map
+ * @nb_ovf: number of overflows since capture start
+ * @pm_ctx: device context for PM operations
+ * @pm_ctx.ev_mode: event mode bits
+ * @pm_ctx.time_cntr: timestamp counter value
+ */
+struct ecap_cnt_dev {
+ bool enabled;
+ struct mutex lock;
+ struct clk *clk;
+ struct regmap *regmap;
+ atomic_t nb_ovf;
+ struct {
+ u8 ev_mode;
+ u32 time_cntr;
+ } pm_ctx;
+};
+
+static u8 ecap_cnt_capture_get_evmode(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, ECAP_ECCTL_REG, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_capture_set_evmode(struct counter_device *counter, u8 ev_mode)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_EV_MODE_MASK, ev_mode);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static void ecap_cnt_capture_enable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+
+ /* Enable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG,
+ ECAP_EVT_EN_MASK, ECAP_EVT_EN_MASK);
+
+ /* Run counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_CFG_MASK,
+ ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK | ECAP_ECCTL_EN_MASK);
+}
+
+static void ecap_cnt_capture_disable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ /* Stop counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_EN_MASK, 0);
+
+ /* Disable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, ECAP_EVT_EN_MASK, 0);
+
+ pm_runtime_put_sync(counter->parent);
+}
+
+static u32 ecap_cnt_count_get_val(struct counter_device *counter, unsigned int reg)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, reg, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_count_set_val(struct counter_device *counter, unsigned int reg, u32 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_write(ecap_dev->regmap, reg, val);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static int ecap_cnt_count_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = ecap_cnt_count_get_val(counter, ECAP_TSCNT_REG);
+
+ return 0;
+}
+
+static int ecap_cnt_count_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_TSCNT_REG, val);
+
+ return 0;
+}
+
+static int ecap_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ *function = COUNTER_FUNCTION_INCREASE;
+
+ return 0;
+}
+
+static int ecap_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ *action = (synapse->signal->id == ECAP_CLOCK_SIG) ?
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE :
+ COUNTER_SYNAPSE_ACTION_NONE;
+
+ return 0;
+}
+
+static int ecap_cnt_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel > ECAP_CEVT_LAST)
+ return -EINVAL;
+
+ switch (watch->event) {
+ case COUNTER_EVENT_CAPTURE:
+ case COUNTER_EVENT_OVERFLOW:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ecap_cnt_clk_get_freq(struct counter_device *counter,
+ struct counter_signal *signal, u64 *freq)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *freq = clk_get_rate(ecap_dev->clk);
+
+ return 0;
+}
+
+static int ecap_cnt_pol_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, enum counter_signal_polarity *pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ int bitval;
+
+ pm_runtime_get_sync(counter->parent);
+ bitval = regmap_test_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ *pol = bitval ? COUNTER_SIGNAL_POLARITY_NEGATIVE : COUNTER_SIGNAL_POLARITY_POSITIVE;
+
+ return 0;
+}
+
+static int ecap_cnt_pol_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, enum counter_signal_polarity pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ if (pol == COUNTER_SIGNAL_POLARITY_NEGATIVE)
+ regmap_set_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ else
+ regmap_clear_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ return 0;
+}
+
+static int ecap_cnt_cap_read(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *cap)
+{
+ *cap = ecap_cnt_count_get_val(counter, ECAP_CAP_REG(idx));
+
+ return 0;
+}
+
+static int ecap_cnt_cap_write(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 cap)
+{
+ if (cap > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_CAP_REG(idx), cap);
+
+ return 0;
+}
+
+static int ecap_cnt_nb_ovf_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *val = atomic_read(&ecap_dev->nb_ovf);
+
+ return 0;
+}
+
+static int ecap_cnt_nb_ovf_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ atomic_set(&ecap_dev->nb_ovf, val);
+
+ return 0;
+}
+
+static int ecap_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = U32_MAX;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *enable = ecap_dev->enabled;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ mutex_lock(&ecap_dev->lock);
+
+ if (enable == ecap_dev->enabled)
+ goto out;
+
+ if (enable)
+ ecap_cnt_capture_enable(counter);
+ else
+ ecap_cnt_capture_disable(counter);
+ ecap_dev->enabled = enable;
+
+out:
+ mutex_unlock(&ecap_dev->lock);
+
+ return 0;
+}
+
+static const struct counter_ops ecap_cnt_ops = {
+ .count_read = ecap_cnt_count_read,
+ .count_write = ecap_cnt_count_write,
+ .function_read = ecap_cnt_function_read,
+ .action_read = ecap_cnt_action_read,
+ .watch_validate = ecap_cnt_watch_validate,
+};
+
+static const enum counter_function ecap_cnt_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+};
+
+static const enum counter_synapse_action ecap_cnt_clock_actions[] = {
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+};
+
+static const enum counter_synapse_action ecap_cnt_input_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_comp ecap_cnt_clock_ext[] = {
+ COUNTER_COMP_SIGNAL_U64("frequency", ecap_cnt_clk_get_freq, NULL),
+};
+
+static const enum counter_signal_polarity ecap_cnt_pol_avail[] = {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_avail, ECAP_NB_CEVT);
+
+static struct counter_comp ecap_cnt_signal_ext[] = {
+ COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array),
+};
+
+static struct counter_signal ecap_cnt_signals[] = {
+ {
+ .id = ECAP_CLOCK_SIG,
+ .name = "Clock Signal",
+ .ext = ecap_cnt_clock_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_clock_ext),
+ },
+ {
+ .id = ECAP_INPUT_SIG,
+ .name = "Input Signal",
+ .ext = ecap_cnt_signal_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_signal_ext),
+ },
+};
+
+static struct counter_synapse ecap_cnt_synapses[] = {
+ {
+ .actions_list = ecap_cnt_clock_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_clock_actions),
+ .signal = &ecap_cnt_signals[ECAP_CLOCK_SIG],
+ },
+ {
+ .actions_list = ecap_cnt_input_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_input_actions),
+ .signal = &ecap_cnt_signals[ECAP_INPUT_SIG],
+ },
+};
+
+static DEFINE_COUNTER_ARRAY_CAPTURE(ecap_cnt_cap_array, ECAP_NB_CEVT);
+
+static struct counter_comp ecap_cnt_count_ext[] = {
+ COUNTER_COMP_ARRAY_CAPTURE(ecap_cnt_cap_read, ecap_cnt_cap_write, ecap_cnt_cap_array),
+ COUNTER_COMP_COUNT_U64("num_overflows", ecap_cnt_nb_ovf_read, ecap_cnt_nb_ovf_write),
+ COUNTER_COMP_CEILING(ecap_cnt_ceiling_read, NULL),
+ COUNTER_COMP_ENABLE(ecap_cnt_enable_read, ecap_cnt_enable_write),
+};
+
+static struct counter_count ecap_cnt_counts[] = {
+ {
+ .name = "Timestamp Counter",
+ .functions_list = ecap_cnt_functions,
+ .num_functions = ARRAY_SIZE(ecap_cnt_functions),
+ .synapses = ecap_cnt_synapses,
+ .num_synapses = ARRAY_SIZE(ecap_cnt_synapses),
+ .ext = ecap_cnt_count_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_count_ext),
+ },
+};
+
+static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
+{
+ struct counter_device *counter_dev = dev_id;
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+ unsigned int clr = 0;
+ unsigned int flg;
+ int i;
+
+ regmap_read(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, &flg);
+
+ /* Check capture events */
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++) {
+ if (flg & ECAP_EVT_FLG_BIT(i)) {
+ counter_push_event(counter_dev, COUNTER_EVENT_CAPTURE, i);
+ clr |= ECAP_EVT_CLR_BIT(i);
+ }
+ }
+
+ /* Check counter overflow */
+ if (flg & ECAP_EVT_FLG_BIT(ECAP_CNTOVF)) {
+ atomic_inc(&ecap_dev->nb_ovf);
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++)
+ counter_push_event(counter_dev, COUNTER_EVENT_OVERFLOW, i);
+ clr |= ECAP_EVT_CLR_BIT(ECAP_CNTOVF);
+ }
+
+ clr |= ECAP_INT_CLR_BIT;
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_CLR_FRC_REG, ECAP_EVT_CLR_MASK, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void ecap_cnt_pm_disable(void *dev)
+{
+ pm_runtime_disable(dev);
+}
+
+static int ecap_cnt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ecap_cnt_dev *ecap_dev;
+ struct counter_device *counter_dev;
+ void __iomem *mmio_base;
+ unsigned long clk_rate;
+ int ret;
+
+ counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
+ if (IS_ERR(counter_dev))
+ return PTR_ERR(counter_dev);
+
+ counter_dev->name = ECAP_DRV_NAME;
+ counter_dev->parent = dev;
+ counter_dev->ops = &ecap_cnt_ops;
+ counter_dev->signals = ecap_cnt_signals;
+ counter_dev->num_signals = ARRAY_SIZE(ecap_cnt_signals);
+ counter_dev->counts = ecap_cnt_counts;
+ counter_dev->num_counts = ARRAY_SIZE(ecap_cnt_counts);
+
+ ecap_dev = counter_priv(counter_dev);
+
+ mutex_init(&ecap_dev->lock);
+
+ ecap_dev->clk = devm_clk_get_enabled(dev, "fck");
+ if (IS_ERR(ecap_dev->clk))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->clk), "failed to get clock\n");
+
+ clk_rate = clk_get_rate(ecap_dev->clk);
+ if (!clk_rate) {
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
+
+ ecap_dev->regmap = devm_regmap_init_mmio(dev, mmio_base, &ecap_cnt_regmap_config);
+ if (IS_ERR(ecap_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->regmap), "failed to init regmap\n");
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get irq\n");
+
+ ret = devm_request_irq(dev, ret, ecap_cnt_isr, 0, pdev->name, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+
+ platform_set_drvdata(pdev, counter_dev);
+
+ pm_runtime_enable(dev);
+
+ /* Register a cleanup callback to care for disabling PM */
+ ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+
+ ret = devm_counter_add(dev, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add counter\n");
+
+ return 0;
+}
+
+static int ecap_cnt_remove(struct platform_device *pdev)
+{
+ struct counter_device *counter_dev = platform_get_drvdata(pdev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ if (ecap_dev->enabled)
+ ecap_cnt_capture_disable(counter_dev);
+
+ return 0;
+}
+
+static int ecap_cnt_suspend(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ /* If eCAP is running, stop capture then save timestamp counter */
+ if (ecap_dev->enabled) {
+ /*
+ * Disabling capture has the following effects:
+ * - interrupts are disabled
+ * - loading of capture registers is disabled
+ * - timebase counter is stopped
+ */
+ ecap_cnt_capture_disable(counter_dev);
+ ecap_dev->pm_ctx.time_cntr = ecap_cnt_count_get_val(counter_dev, ECAP_TSCNT_REG);
+ }
+
+ ecap_dev->pm_ctx.ev_mode = ecap_cnt_capture_get_evmode(counter_dev);
+
+ clk_disable(ecap_dev->clk);
+
+ return 0;
+}
+
+static int ecap_cnt_resume(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ clk_enable(ecap_dev->clk);
+
+ ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
+
+ /* If eCAP was running, restore timestamp counter then run capture */
+ if (ecap_dev->enabled) {
+ ecap_cnt_count_set_val(counter_dev, ECAP_TSCNT_REG, ecap_dev->pm_ctx.time_cntr);
+ ecap_cnt_capture_enable(counter_dev);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ecap_cnt_pm_ops, ecap_cnt_suspend, ecap_cnt_resume);
+
+static const struct of_device_id ecap_cnt_of_match[] = {
+ { .compatible = "ti,am62-ecap-capture" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
+
+static struct platform_driver ecap_cnt_driver = {
+ .probe = ecap_cnt_probe,
+ .remove = ecap_cnt_remove,
+ .driver = {
+ .name = "ecap-capture",
+ .of_match_table = ecap_cnt_of_match,
+ .pm = pm_sleep_ptr(&ecap_cnt_pm_ops),
+ },
+};
+module_platform_driver(ecap_cnt_driver);
+
+MODULE_DESCRIPTION("ECAP Capture driver");
+MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 0489d26eb47c..b0f24cf3e891 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -456,3 +456,4 @@ module_platform_driver(ti_eqep_driver);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("TI eQEP counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 954749afb5fe..82e5de1f6f8c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -153,7 +153,7 @@ config ARM_OMAP2PLUS_CPUFREQ
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
depends on ARCH_QCOM
- depends on QCOM_QFPROM
+ depends on NVMEM_QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
help
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 862a2876f1c9..05fe2902df9a 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -97,8 +97,13 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+ u32 state = states[idx];
- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
+ idx, state);
}
static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3e6aa319920b..55e75fbb658e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
config CRYPTO_DEV_SA2UL
tristate "Support for TI security accelerator"
depends on ARCH_K3 || COMPILE_TEST
- select ARM64_CRYPTO
select CRYPTO_AES
- select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_SHA1
@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
+source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f81703a86b98..116de173a66c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun4i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e79514fce731..af017a087ebf 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
- if (nr_sgs < 0) {
+ if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
- if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
- if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
new file mode 100644
index 000000000000..ae2710ae8d8f
--- /dev/null
+++ b/drivers/crypto/aspeed/Kconfig
@@ -0,0 +1,48 @@
+config CRYPTO_DEV_ASPEED
+ tristate "Support for Aspeed cryptographic engine driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select CRYPTO_ENGINE
+ help
+ Hash and Crypto Engine (HACE) is designed to accelerate the
+ throughput of hash data digest, encryption and decryption.
+
+ Select y here to have support for the cryptographic driver
+ available on Aspeed SoC.
+
+config CRYPTO_DEV_ASPEED_DEBUG
+ bool "Enable Aspeed crypto debug messages"
+ depends on CRYPTO_DEV_ASPEED
+ help
+ Print Aspeed crypto debugging messages if you use this
+ option to ask for those messages.
+ Avoid enabling this option for production build to
+ minimize driver timing.
+
+config CRYPTO_DEV_ASPEED_HACE_HASH
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ hash driver.
+ Supports multiple message digest standards, including
+ SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
+
+config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CFB
+ select CRYPTO_OFB
+ select CRYPTO_CTR
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ crypto driver.
+ Supports AES/DES symmetric-key encryption and decryption
+ with ECB/CBC/CFB/OFB/CTR options.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
new file mode 100644
index 000000000000..a0ed40ddaad1
--- /dev/null
+++ b/drivers/crypto/aspeed/Makefile
@@ -0,0 +1,7 @@
+hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
+aspeed_crypto-objs := aspeed-hace.o \
+ $(hace-hash-y) \
+ $(hace-crypto-y)
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
new file mode 100644
index 000000000000..ef73b0028b4d
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+
+ if (rctx->enc_cmd & HACE_CMD_ENCRYPT)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+
+ return err;
+}
+
+static bool aspeed_crypto_need_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) &&
+ !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE))
+ return true;
+
+ if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) &&
+ !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE))
+ return true;
+
+ return false;
+}
+
+static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct skcipher_request *req)
+{
+ if (hace_dev->version == AST2500_VERSION &&
+ aspeed_crypto_need_fallback(req)) {
+ CIPHER_DBG(hace_dev, "SW fallback\n");
+ return aspeed_crypto_do_fallback(req);
+ }
+
+ return crypto_transfer_skcipher_request_to_engine(
+ hace_dev->crypt_engine_crypto, req);
+}
+
+static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct aspeed_engine_crypto *crypto_engine;
+ int rc;
+
+ crypto_engine = &hace_dev->crypto_engine;
+ crypto_engine->req = req;
+ crypto_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ rc = ctx->start(hace_dev);
+
+ if (rc != -EINPROGRESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(req->iv, crypto_engine->cipher_ctx +
+ DES_KEY_SIZE, DES_KEY_SIZE);
+ else
+ memcpy(req->iv, crypto_engine->cipher_ctx,
+ AES_BLOCK_SIZE);
+ }
+
+ crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req,
+ err);
+
+ return err;
+}
+
+static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct device *dev = hace_dev->dev;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE);
+ }
+
+ return aspeed_sk_complete(hace_dev, 0);
+}
+
+static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *out_sg;
+ int nbytes = 0;
+ int rc = 0;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ out_sg = req->dst;
+
+ /* Copy output buffer to dst scatter-gather lists */
+ nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ rc = -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_out_sg", rctx->dst_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ return aspeed_sk_complete(hace_dev, rc);
+}
+
+static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *in_sg;
+ int nbytes;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ in_sg = req->src;
+
+ nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_in_sg", rctx->src_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ return -EINVAL;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer;
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+}
+
+static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_sg_list *src_list, *dst_list;
+ dma_addr_t src_dma_addr, dst_dma_addr;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *s;
+ int src_sg_len;
+ int dst_sg_len;
+ int total, i;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+ HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
+ /* BIDIRECTIONAL */
+ if (req->dst == req->src) {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_BIDIRECTIONAL);
+ dst_sg_len = src_sg_len;
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ } else {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
+ rctx->dst_nents, DMA_FROM_DEVICE);
+ if (!dst_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
+ rc = -EINVAL;
+ goto free_req_src;
+ }
+ }
+
+ src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
+ src_dma_addr = crypto_engine->cipher_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->src, s, src_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (req->dst == req->src) {
+ dst_list = src_list;
+ dst_dma_addr = src_dma_addr;
+
+ } else {
+ dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
+ dst_dma_addr = crypto_engine->dst_sg_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->dst, s, dst_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+ dst_list[i].len = cpu_to_le32(len);
+
+ }
+
+ dst_list[dst_sg_len].phy_addr = 0;
+ dst_list[dst_sg_len].len = 0;
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer_sg;
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+
+free_req:
+ if (req->dst == req->src) {
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_BIDIRECTIONAL);
+
+ } else {
+ dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_TO_DEVICE);
+ }
+
+ return rc;
+
+free_req_src:
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct crypto_skcipher *cipher;
+ struct aspeed_cipher_ctx *ctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ cipher = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(cipher);
+
+ /* enable interrupt */
+ rctx->enc_cmd |= HACE_CMD_ISR_EN;
+
+ rctx->dst_nents = sg_nents(req->dst);
+ rctx->src_nents = sg_nents(req->src);
+
+ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+ ASPEED_HACE_CONTEXT);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE,
+ req->iv, DES_BLOCK_SIZE);
+ else
+ memcpy(crypto_engine->cipher_ctx, req->iv,
+ AES_BLOCK_SIZE);
+ }
+
+ if (hace_dev->version == AST2600_VERSION) {
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+ return aspeed_sk_start_sg(hace_dev);
+ }
+
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH);
+
+ return aspeed_sk_start(hace_dev);
+}
+
+static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE |
+ HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen);
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen);
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ rc = crypto_des_verify_key(tfm, key);
+ if (rc)
+ return rc;
+
+ } else if (keylen == DES3_EDE_KEY_SIZE) {
+ rc = crypto_des3_ede_verify_key(tfm, key);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_des_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s\n",
+ (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt");
+
+ cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cmd |= HACE_CMD_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cmd |= HACE_CMD_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cmd |= HACE_CMD_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct crypto_aes_ctx gen_aes_key;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ if (ctx->hace_dev->version == AST2500_VERSION) {
+ aes_expandkey(&gen_aes_key, key, keylen);
+ memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+
+ } else {
+ memcpy(ctx->key, key, keylen);
+ }
+
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_hace_alg *crypto_alg;
+
+
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ ctx->hace_dev = crypto_alg->hace_dev;
+ ctx->start = aspeed_hace_skcipher_trigger;
+
+ CIPHER_DBG(ctx->hace_dev, "%s\n", name);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
+
+ ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+
+ CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base));
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
+ {
+ .alg.skcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ecb_encrypt,
+ .decrypt = aspeed_aes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aspeed-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cbc_encrypt,
+ .decrypt = aspeed_aes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aspeed-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cfb_encrypt,
+ .decrypt = aspeed_aes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "aspeed-cfb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ofb_encrypt,
+ .decrypt = aspeed_aes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "aspeed-ofb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ecb_encrypt,
+ .decrypt = aspeed_des_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "aspeed-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cbc_encrypt,
+ .decrypt = aspeed_des_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "aspeed-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cfb_encrypt,
+ .decrypt = aspeed_des_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "aspeed-cfb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ofb_encrypt,
+ .decrypt = aspeed_des_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "aspeed-ofb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ecb_encrypt,
+ .decrypt = aspeed_tdes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "aspeed-ecb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cbc_encrypt,
+ .decrypt = aspeed_tdes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "aspeed-cbc-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cfb_encrypt,
+ .decrypt = aspeed_tdes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des3_ede)",
+ .cra_driver_name = "aspeed-cfb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ofb_encrypt,
+ .decrypt = aspeed_tdes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "aspeed-ofb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ctr_encrypt,
+ .decrypt = aspeed_aes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aspeed-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ctr_encrypt,
+ .decrypt = aspeed_des_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "aspeed-ctr-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ctr_encrypt,
+ .decrypt = aspeed_tdes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "aspeed-ctr-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+
+};
+
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+}
+
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+ aspeed_crypto_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+ aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
new file mode 100644
index 000000000000..935135229ebd
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define AHASH_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define AHASH_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* Initialization Vectors for SHA-family */
+static const __be32 sha1_iv[8] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0
+};
+
+static const __be32 sha224_iv[8] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_iv[8] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 sha384_iv[8] = {
+ cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+ cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+ cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+ cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
+};
+
+static const __be64 sha512_iv[8] = {
+ cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+ cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+ cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+ cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
+};
+
+static const __be32 sha512_224_iv[16] = {
+ cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
+ cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
+ cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
+ cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
+ cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
+ cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
+ cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
+ cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
+};
+
+static const __be32 sha512_256_iv[16] = {
+ cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
+ cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
+ cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
+ cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
+ cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
+ cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
+ cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
+ cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
+};
+
+/* The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ * - if message length < 56 bytes then padlen = 56 - message length
+ * - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
+ */
+static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx)
+{
+ unsigned int index, padlen;
+ __be64 bits[2];
+
+ AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
+
+ switch (rctx->flags & SHA_FLAGS_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+ index = rctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
+ rctx->bufcnt += padlen + 8;
+ break;
+ default:
+ bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
+ bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
+ rctx->digcnt[0] >> 61);
+ index = rctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
+ rctx->bufcnt += padlen + 16;
+ break;
+ }
+}
+
+/*
+ * Prepare DMA buffer before hardware engine
+ * processing.
+ */
+static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int length, remain;
+
+ length = rctx->total + rctx->bufcnt;
+ remain = length % rctx->block_size;
+
+ AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+ if (rctx->bufcnt)
+ memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
+
+ if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+ rctx->bufcnt, rctx->src_sg,
+ rctx->offset, rctx->total - remain, 0);
+ rctx->offset += rctx->total - remain;
+
+ } else {
+ dev_warn(hace_dev->dev, "Hash data length is too large\n");
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+ rctx->offset, remain, 0);
+
+ rctx->bufcnt = remain;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ return -ENOMEM;
+ }
+
+ hash_engine->src_length = length - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+}
+
+/*
+ * Prepare DMA buffer as SG list buffer before
+ * hardware engine processing.
+ */
+static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct aspeed_sg_list *src_list;
+ struct scatterlist *s;
+ int length, remain, sg_len, i;
+ int rc = 0;
+
+ remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
+ length = rctx->total + rctx->bufcnt - remain;
+
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ "length", length, "remain", remain);
+
+ sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+ if (!sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto free_src_sg;
+ }
+
+ if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ phy_addr = rctx->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
+
+ /* Last sg list */
+ if (length == 0)
+ len |= HASH_SG_LAST_LIST;
+
+ src_list[0].phy_addr = cpu_to_le32(phy_addr);
+ src_list[0].len = cpu_to_le32(len);
+ src_list++;
+ }
+
+ if (length != 0) {
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ len |= HASH_SG_LAST_LIST;
+ length = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+ }
+
+ if (length != 0) {
+ rc = -EINVAL;
+ goto free_rctx_buffer;
+ }
+
+ rctx->offset = rctx->total - remain;
+ hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+
+free_rctx_buffer:
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+free_src_sg:
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+
+ return 0;
+}
+
+/*
+ * Copy digest to the corresponding request result.
+ * This function will be called at final() stage.
+ */
+static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+/*
+ * Trigger hardware engines to do the math.
+ */
+static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ aspeed_hace_fn_t resume)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
+ &hash_engine->src_dma, &hash_engine->digest_dma,
+ hash_engine->src_length);
+
+ rctx->cmd |= HASH_CMD_INT_ENABLE;
+ hash_engine->resume = resume;
+
+ ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_DIGEST_BUFF);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_KEY_BUFF);
+ ast_hace_write(hace_dev, hash_engine->src_length,
+ ASPEED_HACE_HASH_DATA_LEN);
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * HMAC resume aims to do the second pass produces
+ * the final HMAC code derived from the inner hash
+ * result and the outer key.
+ */
+static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ /* o key pad + hash sum 1 */
+ memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+ memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
+
+ rctx->bufcnt = rctx->block_size + rctx->digsize;
+ rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+ memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ if (rctx->flags & SHA_FLAGS_HMAC)
+ return aspeed_hace_ahash_trigger(hace_dev,
+ aspeed_ahash_hmac_resume);
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
+ rctx->total - rctx->offset, 0);
+
+ rctx->bufcnt = rctx->total - rctx->offset;
+ rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ aspeed_hace_fn_t resume;
+ int ret;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ if (hace_dev->version == AST2600_VERSION) {
+ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+ resume = aspeed_ahash_update_resume_sg;
+
+ } else {
+ resume = aspeed_ahash_update_resume;
+ }
+
+ ret = hash_engine->dma_prepare(hace_dev);
+ if (ret)
+ return ret;
+
+ return aspeed_hace_ahash_trigger(hace_dev, resume);
+}
+
+static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(
+ hace_dev->crypt_engine_hash, req);
+}
+
+static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+ int ret = 0;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ if (rctx->op == SHA_OP_UPDATE)
+ ret = aspeed_ahash_req_update(hace_dev);
+ else if (rctx->op == SHA_OP_FINAL)
+ ret = aspeed_ahash_req_final(hace_dev);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->req = req;
+
+ if (hace_dev->version == AST2600_VERSION)
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+ else
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+
+ return 0;
+}
+
+static int aspeed_sham_update(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->total = req->nbytes;
+ rctx->src_sg = req->src;
+ rctx->offset = 0;
+ rctx->src_nents = sg_nents(req->src);
+ rctx->op = SHA_OP_UPDATE;
+
+ rctx->digcnt[0] += rctx->total;
+ if (rctx->digcnt[0] < rctx->total)
+ rctx->digcnt[1]++;
+
+ if (rctx->bufcnt + rctx->total < rctx->block_size) {
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
+ rctx->src_sg, rctx->offset,
+ rctx->total, 0);
+ rctx->bufcnt += rctx->total;
+
+ return 0;
+ }
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+static int aspeed_sham_final(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+ req->nbytes, rctx->total);
+ rctx->op = SHA_OP_FINAL;
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_finup(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ int rc1, rc2;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->flags |= SHA_FLAGS_FINUP;
+
+ rc1 = aspeed_sham_update(req);
+ if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
+ return rc1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ rc2 = aspeed_sham_final(req);
+
+ return rc1 ? : rc2;
+}
+
+static int aspeed_sham_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+ crypto_tfm_alg_name(&tfm->base),
+ crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA1;
+ rctx->digsize = SHA1_DIGEST_SIZE;
+ rctx->block_size = SHA1_BLOCK_SIZE;
+ rctx->sha_iv = sha1_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha1_iv, rctx->ivsize);
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA224_BLOCK_SIZE;
+ rctx->sha_iv = sha224_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA256_BLOCK_SIZE;
+ rctx->sha_iv = sha256_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha256_iv, rctx->ivsize);
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA384;
+ rctx->digsize = SHA384_DIGEST_SIZE;
+ rctx->block_size = SHA384_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha384_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha384_iv, rctx->ivsize);
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512;
+ rctx->digsize = SHA512_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha512_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sha512s_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_224_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_256_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sham_digest(struct ahash_request *req)
+{
+ return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
+}
+
+static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int err = 0;
+ int i;
+
+ AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
+ keylen);
+
+ if (keylen > bs) {
+ err = aspeed_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ bctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ return err;
+}
+
+static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+{
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_alg *ast_alg;
+
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ tctx->hace_dev = ast_alg->hace_dev;
+ tctx->flags = 0;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct aspeed_sham_reqctx));
+
+ if (ast_alg->alg_base) {
+ /* hmac related */
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ tctx->flags |= SHA_FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ dev_warn(ast_alg->hace_dev->dev,
+ "base driver '%s' could not be loaded.\n",
+ ast_alg->alg_base);
+ return PTR_ERR(bctx->shash);
+ }
+ }
+
+ tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
+ tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
+
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "aspeed-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "aspeed-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "aspeed-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha1",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "aspeed-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha224",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "aspeed-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha256",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "aspeed-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "aspeed-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "aspeed-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_224",
+ .cra_driver_name = "aspeed-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_256",
+ .cra_driver_name = "aspeed-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha384",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "aspeed-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "aspeed-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_224",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_224)",
+ .cra_driver_name = "aspeed-hmac-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_256",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_256)",
+ .cra_driver_name = "aspeed-hmac-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+}
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
+ aspeed_ahash_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+ aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
new file mode 100644
index 000000000000..656cb92c8bb6
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define HACE_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define HACE_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* HACE interrupt service routine */
+static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ u32 sts;
+
+ sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
+ ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
+
+ HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
+
+ if (sts & HACE_HASH_ISR) {
+ if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&hash_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "HASH no active requests.\n");
+ }
+
+ if (sts & HACE_CRYPTO_ISR) {
+ if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&crypto_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_hace_crypto_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+
+ crypto_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_hash_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ hash_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_register_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_register_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_unregister_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_unregister_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static const struct of_device_id aspeed_hace_of_matches[] = {
+ { .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+ { .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
+ {},
+};
+
+static int aspeed_hace_probe(struct platform_device *pdev)
+{
+ struct aspeed_engine_crypto *crypto_engine;
+ const struct of_device_id *hace_dev_id;
+ struct aspeed_engine_hash *hash_engine;
+ struct aspeed_hace_dev *hace_dev;
+ struct resource *res;
+ int rc;
+
+ hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+ GFP_KERNEL);
+ if (!hace_dev)
+ return -ENOMEM;
+
+ hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
+ if (!hace_dev_id) {
+ dev_err(&pdev->dev, "Failed to match hace dev id\n");
+ return -EINVAL;
+ }
+
+ hace_dev->dev = &pdev->dev;
+ hace_dev->version = (unsigned long)hace_dev_id->data;
+ hash_engine = &hace_dev->hash_engine;
+ crypto_engine = &hace_dev->crypto_engine;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, hace_dev);
+
+ hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hace_dev->regs))
+ return PTR_ERR(hace_dev->regs);
+
+ /* Get irq number and register it */
+ hace_dev->irq = platform_get_irq(pdev, 0);
+ if (hace_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
+ dev_name(&pdev->dev), hace_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ return rc;
+ }
+
+ /* Get clk and enable it */
+ hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hace_dev->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(hace_dev->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
+ return rc;
+ }
+
+ /* Initialize crypto hardware engine structure for hash */
+ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_hash) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+ if (rc)
+ goto err_engine_hash_start;
+
+ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+ (unsigned long)hace_dev);
+
+ /* Initialize crypto hardware engine structure for crypto */
+ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_crypto) {
+ rc = -ENOMEM;
+ goto err_engine_hash_start;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+ if (rc)
+ goto err_engine_crypto_start;
+
+ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+ (unsigned long)hace_dev);
+
+ /* Allocate DMA buffer for hash engine input used */
+ hash_engine->ahash_src_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_HASH_SRC_DMA_BUF_LEN,
+ &hash_engine->ahash_src_dma_addr,
+ GFP_KERNEL);
+ if (!hash_engine->ahash_src_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine context used */
+ crypto_engine->cipher_ctx =
+ dmam_alloc_coherent(&pdev->dev,
+ PAGE_SIZE,
+ &crypto_engine->cipher_ctx_dma,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_ctx) {
+ dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine input used */
+ crypto_engine->cipher_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+ &crypto_engine->cipher_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_addr) {
+ dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine output used */
+ if (hace_dev->version == AST2600_VERSION) {
+ crypto_engine->dst_sg_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+ &crypto_engine->dst_sg_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->dst_sg_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+ }
+
+ aspeed_hace_register(hace_dev);
+
+ dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_crypto_start:
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+err_engine_hash_start:
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+clk_exit:
+ clk_disable_unprepare(hace_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_hace_remove(struct platform_device *pdev)
+{
+ struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ aspeed_hace_unregister(hace_dev);
+
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+
+ tasklet_kill(&hash_engine->done_task);
+ tasklet_kill(&crypto_engine->done_task);
+
+ clk_disable_unprepare(hace_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
+
+static struct platform_driver aspeed_hace_driver = {
+ .probe = aspeed_hace_probe,
+ .remove = aspeed_hace_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_hace_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_hace_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
new file mode 100644
index 000000000000..f2cde23b56ae
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_HACE_H__
+#define __ASPEED_HACE_H__
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+/*****************************
+ * *
+ * HACE register definitions *
+ * *
+ * ***************************/
+#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */
+#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */
+#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */
+#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */
+#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
+
+/* G5 */
+#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
+/* G6 */
+#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
+#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
+
+#define ASPEED_HACE_STS 0x1C /* HACE Status Register */
+
+#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */
+#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */
+#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */
+#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
+#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
+
+/* crypto cmd */
+#define HACE_CMD_SINGLE_DES 0
+#define HACE_CMD_TRIPLE_DES BIT(17)
+#define HACE_CMD_AES_SELECT 0
+#define HACE_CMD_DES_SELECT BIT(16)
+#define HACE_CMD_ISR_EN BIT(12)
+#define HACE_CMD_CONTEXT_SAVE_ENABLE (0)
+#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9)
+#define HACE_CMD_AES (0)
+#define HACE_CMD_DES (0)
+#define HACE_CMD_RC4 BIT(8)
+#define HACE_CMD_DECRYPT (0)
+#define HACE_CMD_ENCRYPT BIT(7)
+
+#define HACE_CMD_ECB (0x0 << 4)
+#define HACE_CMD_CBC (0x1 << 4)
+#define HACE_CMD_CFB (0x2 << 4)
+#define HACE_CMD_OFB (0x3 << 4)
+#define HACE_CMD_CTR (0x4 << 4)
+#define HACE_CMD_OP_MODE_MASK (0x7 << 4)
+
+#define HACE_CMD_AES128 (0x0 << 2)
+#define HACE_CMD_AES192 (0x1 << 2)
+#define HACE_CMD_AES256 (0x2 << 2)
+#define HACE_CMD_OP_CASCADE (0x3)
+#define HACE_CMD_OP_INDEPENDENT (0x1)
+
+/* G5 */
+#define HACE_CMD_RI_WO_DATA_ENABLE (0)
+#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11)
+#define HACE_CMD_CONTEXT_LOAD_ENABLE (0)
+#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10)
+/* G6 */
+#define HACE_CMD_AES_KEY_FROM_OTP BIT(24)
+#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23)
+#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22)
+#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21)
+#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HACE_CMD_DES_SG_CTRL BIT(19)
+#define HACE_CMD_SRC_SG_CTRL BIT(18)
+#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14)
+#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14)
+#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14)
+#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14)
+#define HACE_CMD_AES_KEY_HW_EXP BIT(13)
+#define HACE_CMD_GCM (0x5 << 4)
+
+/* interrupt status reg */
+#define HACE_CRYPTO_ISR BIT(12)
+#define HACE_HASH_ISR BIT(9)
+#define HACE_HASH_BUSY BIT(0)
+
+/* hash cmd reg */
+#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18)
+#define HASH_CMD_SHA512_224 (0x3 << 10)
+#define HASH_CMD_SHA512_256 (0x2 << 10)
+#define HASH_CMD_SHA384 (0x1 << 10)
+#define HASH_CMD_SHA512 (0)
+#define HASH_CMD_INT_ENABLE BIT(9)
+#define HASH_CMD_HMAC (0x1 << 7)
+#define HASH_CMD_ACC_MODE (0x2 << 7)
+#define HASH_CMD_HMAC_KEY (0x3 << 7)
+#define HASH_CMD_SHA1 (0x2 << 4)
+#define HASH_CMD_SHA224 (0x4 << 4)
+#define HASH_CMD_SHA256 (0x5 << 4)
+#define HASH_CMD_SHA512_SER (0x6 << 4)
+#define HASH_CMD_SHA_SWAP (0x2 << 2)
+
+#define HASH_SG_LAST_LIST BIT(31)
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+
+#define SHA_OP_UPDATE 1
+#define SHA_OP_FINAL 2
+
+#define SHA_FLAGS_SHA1 BIT(0)
+#define SHA_FLAGS_SHA224 BIT(1)
+#define SHA_FLAGS_SHA256 BIT(2)
+#define SHA_FLAGS_SHA384 BIT(3)
+#define SHA_FLAGS_SHA512 BIT(4)
+#define SHA_FLAGS_SHA512_224 BIT(5)
+#define SHA_FLAGS_SHA512_256 BIT(6)
+#define SHA_FLAGS_HMAC BIT(8)
+#define SHA_FLAGS_FINUP BIT(9)
+#define SHA_FLAGS_MASK (0xff)
+
+#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0
+#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_HASH_QUEUE_LENGTH 50
+
+#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
+ HACE_CMD_OFB | HACE_CMD_CTR)
+
+struct aspeed_hace_dev;
+
+typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
+
+struct aspeed_sg_list {
+ __le32 len;
+ __le32 phy_addr;
+};
+
+struct aspeed_engine_hash {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct ahash_request *req;
+
+ /* input buffer */
+ void *ahash_src_addr;
+ dma_addr_t ahash_src_dma_addr;
+
+ dma_addr_t src_dma;
+ dma_addr_t digest_dma;
+
+ size_t src_length;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+ aspeed_hace_fn_t dma_prepare;
+};
+
+struct aspeed_sha_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA512_BLOCK_SIZE];
+ u8 opad[SHA512_BLOCK_SIZE];
+};
+
+struct aspeed_sham_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ unsigned long flags; /* hmac flag */
+
+ struct aspeed_sha_hmac_ctx base[0];
+};
+
+struct aspeed_sham_reqctx {
+ unsigned long flags; /* final update flag should no use*/
+ unsigned long op; /* final or update */
+ u32 cmd; /* trigger cmd */
+
+ /* walk state */
+ struct scatterlist *src_sg;
+ int src_nents;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* per update length */
+
+ size_t digsize;
+ size_t block_size;
+ size_t ivsize;
+ const __be32 *sha_iv;
+
+ /* remain data buffer */
+ u8 buffer[SHA512_BLOCK_SIZE * 2];
+ dma_addr_t buffer_dma_addr;
+ size_t bufcnt; /* buffer counter */
+
+ /* output buffer */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+ dma_addr_t digest_dma_addr;
+ u64 digcnt[2];
+};
+
+struct aspeed_engine_crypto {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct skcipher_request *req;
+
+ /* context buffer */
+ void *cipher_ctx;
+ dma_addr_t cipher_ctx_dma;
+
+ /* input buffer, could be single/scatter-gather lists */
+ void *cipher_addr;
+ dma_addr_t cipher_dma_addr;
+
+ /* output buffer, only used in scatter-gather lists */
+ void *dst_sg_addr;
+ dma_addr_t dst_sg_dma_addr;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+};
+
+struct aspeed_cipher_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ int key_len;
+ u8 key[AES_MAX_KEYLENGTH];
+
+ /* callback func */
+ aspeed_hace_fn_t start;
+
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct aspeed_cipher_reqctx {
+ int enc_cmd;
+ int src_nents;
+ int dst_nents;
+
+ struct skcipher_request fallback_req; /* keep at the end */
+};
+
+struct aspeed_hace_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ unsigned long version;
+
+ struct crypto_engine *crypt_engine_hash;
+ struct crypto_engine *crypt_engine_crypto;
+
+ struct aspeed_engine_hash hash_engine;
+ struct aspeed_engine_crypto crypto_engine;
+};
+
+struct aspeed_hace_alg {
+ struct aspeed_hace_dev *hace_dev;
+
+ const char *alg_base;
+
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+enum aspeed_version {
+ AST2500_VERSION = 5,
+ AST2600_VERSION
+};
+
+#define ast_hace_write(hace, val, offset) \
+ writel((val), (hace)->regs + (offset))
+#define ast_hace_read(hace, offset) \
+ readl((hace)->regs + (offset))
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+
+#endif
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 9ad188cffd0d..51c66afbe677 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
cipher_len = regk_crypto_key_256;
break;
default:
- pr_err("%s: Invalid key length %d!\n",
+ pr_err("%s: Invalid key length %zu!\n",
MODULE_NAME, ctx->key_length);
return -EINVAL;
}
@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
return;
}
- spin_lock_bh(&ac->queue_lock);
+ spin_lock(&ac->queue_lock);
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
artpec6_crypto_process_queue(ac, &complete_in_progress);
- spin_unlock_bh(&ac->queue_lock);
+ spin_unlock(&ac->queue_lock);
/* Perform the completion callbacks without holding the queue lock
* to allow new request submissions from the callbacks.
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 053315e260c2..c8c799428fe0 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
/* SPU2 hardware does not compute hash of zero length data */
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
flow_log("Doing %sfinal %s zero-len hash request in software\n",
rctx->is_final ? "" : "non-", alg_name);
err = do_shash((unsigned char *)alg_name, req->result,
@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
* supported by the hardware, we need to handle it in software
* by calling synchronous hash functions.
*/
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
hash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(hash)) {
ret = PTR_ERR(hash);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 71281a3bdbdc..d6d87332140a 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -231,7 +231,7 @@ struct iproc_ctx_s {
/*
* shash descriptor - needed to perform incremental hashing in
- * in software, when hw doesn't support it.
+ * software, when hw doesn't support it.
*/
struct shash_desc *shash;
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 8ec6edc69f3f..ae4791a8ec4a 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- * to the CPT instruction doorbell count. Readback value is the the
+ * to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 8c32d0eb8fcf..6872ac344001 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 7df71fcebe8f..1046a746d36f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 0);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 0);
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 1);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 1);
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
@@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* Legacy compress framework end */
/* SCOMP framework start */
@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* SCOMP framework end */
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ec97daf0fcb7..278636ed251a 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_des3_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7d4b4ad1db1f..9f753cb4f5f1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
+ dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9f588c9728f8..06fc7156c04f 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void)
if (IS_ERR(fp)) {
int ret = PTR_ERR(fp);
- dev_err(sev->dev,
- "SEV: could not open %s for read, error %d\n",
- init_ex_path, ret);
+ if (ret == -ENOENT) {
+ dev_info(sev->dev,
+ "SEV: %s does not exist and will be created later.\n",
+ init_ex_path);
+ ret = 0;
+ } else {
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ }
return ret;
}
nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
if (nread != NV_LENGTH) {
- dev_err(sev->dev,
- "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ dev_info(sev->dev,
+ "SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nread);
- return -EIO;
}
dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
@@ -231,7 +237,7 @@ static int sev_read_init_ex_file(void)
return 0;
}
-static void sev_write_init_ex_file(void)
+static int sev_write_init_ex_file(void)
{
struct sev_device *sev = psp_master->sev_data;
struct file *fp;
@@ -241,14 +247,16 @@ static void sev_write_init_ex_file(void)
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
dev_err(sev->dev,
- "SEV: could not open file for write, error %ld\n",
- PTR_ERR(fp));
- return;
+ "SEV: could not open file for write, error %d\n",
+ ret);
+ return ret;
}
nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
@@ -259,18 +267,20 @@ static void sev_write_init_ex_file(void)
dev_err(sev->dev,
"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nwrite);
- return;
+ return -EIO;
}
dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+
+ return 0;
}
-static void sev_write_init_ex_file_if_required(int cmd_id)
+static int sev_write_init_ex_file_if_required(int cmd_id)
{
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
/*
* Only a few platform commands modify the SPI/NV area, but none of the
@@ -285,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id)
case SEV_CMD_PEK_GEN:
break;
default:
- return;
+ return 0;
}
- sev_write_init_ex_file();
+ return sev_write_init_ex_file();
}
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
@@ -361,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
} else {
- sev_write_init_ex_file_if_required(cmd);
+ ret = sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -410,17 +420,12 @@ static int __sev_init_locked(int *error)
static int __sev_init_ex_locked(int *error)
{
struct sev_data_init_ex data;
- int ret;
memset(&data, 0, sizeof(data));
data.length = sizeof(data);
data.nv_address = __psp_pa(sev_init_ex_buffer);
data.nv_len = NV_LENGTH;
- ret = sev_read_init_ex_file();
- if (ret)
- return ret;
-
if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
@@ -439,7 +444,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret = -1;
+ int rc = 0, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
@@ -450,8 +455,15 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
- init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
- __sev_init_locked;
+ if (sev_init_ex_buffer) {
+ init_function = __sev_init_ex_locked;
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+ } else {
+ init_function = __sev_init_locked;
+ }
+
rc = init_function(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
@@ -744,6 +756,11 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
+ if (!sev_version_greater_or_equal(0, 15)) {
+ dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
+ return -1;
+ }
+
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
dev_dbg(dev, "No SEV firmware file present\n");
return -1;
@@ -776,6 +793,14 @@ static int sev_update_firmware(struct device *dev)
data->len = firmware->size;
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
+ /*
+ * A quirk for fixing the committed TCB version, when upgrading from
+ * earlier firmware version than 1.50.
+ */
+ if (!ret && !sev_version_greater_or_equal(1, 50))
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
else
@@ -1285,8 +1310,7 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(sev->dev) == 0)
+ if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
/* If an init_ex_path is provided rely on INIT_EX for PSP initialization
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 6140e4927322..9efd88f871d1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
}
ret = dma_map_sg(dev, sg, *nents, direction);
- if (dma_mapping_error(dev, ret)) {
+ if (!ret) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9a0558ed82f9..9f0b94c8e03d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -22,7 +22,8 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM_MAX
};
enum hpre_ctrl_dbgfs_file {
@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
-#define HPRE_CLUSTERS_NUM_V3 1
-#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
@@ -105,5 +103,5 @@ struct hpre_sqe {
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
-
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 3ba6f15deafc..ef02dadd6217 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -51,6 +51,12 @@ struct hpre_ctx;
#define HPRE_ECC_HW256_KSZ_B 32
#define HPRE_ECC_HW384_KSZ_B 48
+/* capability register mask of driver */
+#define HPRE_DRV_RSA_MASK_CAP BIT(0)
+#define HPRE_DRV_DH_MASK_CAP BIT(1)
+#define HPRE_DRV_ECDH_MASK_CAP BIT(2)
+#define HPRE_DRV_X25519_MASK_CAP BIT(5)
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -147,7 +153,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx)
int id;
spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_irqrestore(&ctx->req_lock, flags);
return id;
@@ -488,7 +494,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
if (ret)
hpre_stop_qp_and_put(qp);
@@ -2002,55 +2008,53 @@ static struct kpp_alg dh = {
},
};
-static struct kpp_alg ecdh_nist_p192 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p256 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p384 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
+static struct kpp_alg ecdh_curves[] = {
+ {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
+ .cra_module = THIS_MODULE,
+ },
+ }
};
static struct kpp_alg curve25519_alg = {
@@ -2070,78 +2074,144 @@ static struct kpp_alg curve25519_alg = {
},
};
-
-static int hpre_register_ecdh(void)
+static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_kpp(&ecdh_nist_p192);
- if (ret)
- return ret;
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return 0;
- ret = crypto_register_kpp(&ecdh_nist_p256);
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
if (ret)
- goto unregister_ecdh_p192;
+ dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- ret = crypto_register_kpp(&ecdh_nist_p384);
+ return ret;
+}
+
+static void hpre_unregister_rsa(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return;
+
+ crypto_unregister_akcipher(&rsa);
+}
+
+static int hpre_register_dh(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&dh);
if (ret)
- goto unregister_ecdh_p256;
+ dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_dh(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&dh);
+}
+
+static int hpre_register_ecdh(struct hisi_qm *qm)
+{
+ int ret, i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
+ ret = crypto_register_kpp(&ecdh_curves[i]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
+ ecdh_curves[i].base.cra_name, ret);
+ goto unreg_kpp;
+ }
+ }
return 0;
-unregister_ecdh_p256:
- crypto_unregister_kpp(&ecdh_nist_p256);
-unregister_ecdh_p192:
- crypto_unregister_kpp(&ecdh_nist_p192);
+unreg_kpp:
+ for (--i; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+
return ret;
}
-static void hpre_unregister_ecdh(void)
+static void hpre_unregister_ecdh(struct hisi_qm *qm)
{
- crypto_unregister_kpp(&ecdh_nist_p384);
- crypto_unregister_kpp(&ecdh_nist_p256);
- crypto_unregister_kpp(&ecdh_nist_p192);
+ int i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return;
+
+ for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+}
+
+static int hpre_register_x25519(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_x25519(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&curve25519_alg);
}
int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
+ ret = hpre_register_rsa(qm);
if (ret)
return ret;
- ret = crypto_register_kpp(&dh);
+ ret = hpre_register_dh(qm);
if (ret)
goto unreg_rsa;
- if (qm->ver >= QM_HW_V3) {
- ret = hpre_register_ecdh();
- if (ret)
- goto unreg_dh;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- goto unreg_ecdh;
- }
- return 0;
+ ret = hpre_register_ecdh(qm);
+ if (ret)
+ goto unreg_dh;
+
+ ret = hpre_register_x25519(qm);
+ if (ret)
+ goto unreg_ecdh;
+
+ return ret;
unreg_ecdh:
- hpre_unregister_ecdh();
+ hpre_unregister_ecdh(qm);
unreg_dh:
- crypto_unregister_kpp(&dh);
+ hpre_unregister_dh(qm);
unreg_rsa:
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_rsa(qm);
return ret;
}
void hpre_algs_unregister(struct hisi_qm *qm)
{
- if (qm->ver >= QM_HW_V3) {
- crypto_unregister_kpp(&curve25519_alg);
- hpre_unregister_ecdh();
- }
-
- crypto_unregister_kpp(&dh);
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_x25519(qm);
+ hpre_unregister_ecdh(qm);
+ hpre_unregister_dh(qm);
+ hpre_unregister_rsa(qm);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 9d529df0eab9..471e5ca720f5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -53,9 +53,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -79,8 +77,6 @@
#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
#define HPRE_BD_USR_MASK GENMASK(1, 0)
-#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
-#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
#define HPRE_PREFETCH_CFG 0x301130
#define HPRE_SVA_PREFTCH_DFX 0x30115C
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
@@ -122,6 +118,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_DEV_ALG_MAX_LEN 256
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -137,6 +135,38 @@ struct hpre_hw_error {
const char *msg;
};
+struct hpre_dev_alg {
+ u32 alg_msk;
+ const char *alg;
+};
+
+static const struct hpre_dev_alg hpre_dev_algs[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = "rsa\n"
+ }, {
+ .alg_msk = BIT(1),
+ .alg = "dh\n"
+ }, {
+ .alg_msk = BIT(2),
+ .alg = "ecdh\n"
+ }, {
+ .alg_msk = BIT(3),
+ .alg = "ecdsa\n"
+ }, {
+ .alg_msk = BIT(4),
+ .alg = "sm2\n"
+ }, {
+ .alg_msk = BIT(5),
+ .alg = "x25519\n"
+ }, {
+ .alg_msk = BIT(6),
+ .alg = "x448\n"
+ }, {
+ /* sentinel */
+ }
+};
+
static struct hisi_qm_list hpre_devices = {
.register_to_crypto = hpre_algs_register,
.unregister_from_crypto = hpre_algs_unregister,
@@ -147,6 +177,62 @@ static const char * const hpre_debug_file_name[] = {
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
+enum hpre_cap_type {
+ HPRE_QM_NFE_MASK_CAP,
+ HPRE_QM_RESET_MASK_CAP,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_QM_CE_MASK_CAP,
+ HPRE_NFE_MASK_CAP,
+ HPRE_RESET_MASK_CAP,
+ HPRE_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_CE_MASK_CAP,
+ HPRE_CLUSTER_NUM_CAP,
+ HPRE_CORE_TYPE_NUM_CAP,
+ HPRE_CORE_NUM_CAP,
+ HPRE_CLUSTER_CORE_NUM_CAP,
+ HPRE_CORE_ENABLE_BITMAP_CAP,
+ HPRE_DRV_ALG_BITMAP_CAP,
+ HPRE_DEV_ALG_BITMAP_CAP,
+ HPRE_CORE1_ALG_BITMAP_CAP,
+ HPRE_CORE2_ALG_BITMAP_CAP,
+ HPRE_CORE3_ALG_BITMAP_CAP,
+ HPRE_CORE4_ALG_BITMAP_CAP,
+ HPRE_CORE5_ALG_BITMAP_CAP,
+ HPRE_CORE6_ALG_BITMAP_CAP,
+ HPRE_CORE7_ALG_BITMAP_CAP,
+ HPRE_CORE8_ALG_BITMAP_CAP,
+ HPRE_CORE9_ALG_BITMAP_CAP,
+ HPRE_CORE10_ALG_BITMAP_CAP
+};
+
+static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
+ {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+ {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+ {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+ {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
+ {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
+ {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
+ {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
+ {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+};
+
static const struct hpre_hw_error hpre_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -262,6 +348,46 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+ if (alg & cap_val)
+ return true;
+
+ return false;
+}
+
+static int hpre_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_msk;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+ if (alg_msk & hpre_dev_algs[i].alg_msk)
+ strcat(algs, hpre_dev_algs[i].alg);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -330,14 +456,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static inline int hpre_cluster_num(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
- HPRE_CLUSTERS_NUM_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
}
static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ?
- HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
}
struct hisi_qp *hpre_create_qp(u8 type)
@@ -457,7 +581,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -478,7 +602,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
@@ -630,7 +754,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -644,21 +769,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- /* disable hpre hw error interrupts */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ /* disable hpre hw error interrupts */
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+
/* clear HPRE hw error source if having */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
- writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
@@ -708,7 +842,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}
-static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
int cluster_index = file->index - HPRE_CLUSTER_CTRL;
@@ -716,8 +850,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
HPRE_CLSTR_ADDR_INTRVL;
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
-
- return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -792,9 +924,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
goto err_input;
break;
case HPRE_CLUSTER_CTRL:
- ret = hpre_cluster_inqry_write(file, val);
- if (ret)
- goto err_input;
+ hpre_cluster_inqry_write(file, val);
break;
default:
ret = -EINVAL;
@@ -1006,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
- else
- qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -1030,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qm_list = &hpre_devices;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init hpre qm configures!\n");
+ return ret;
+ }
+
+ ret = hpre_set_qm_algs(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to set hpre algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static int hpre_show_last_regs_init(struct hisi_qm *qm)
@@ -1129,7 +1269,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,14 +1291,20 @@ static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
+ err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hpre_err_ini = {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ad83c194d664..8b387de69d22 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -22,20 +22,17 @@
#define QM_VF_AEQ_INT_MASK 0x4
#define QM_VF_EQ_INT_SOURCE 0x8
#define QM_VF_EQ_INT_MASK 0xc
-#define QM_IRQ_NUM_V1 1
-#define QM_IRQ_NUM_PF_V2 4
-#define QM_IRQ_NUM_VF_V2 2
-#define QM_IRQ_NUM_VF_V3 3
-#define QM_EQ_EVENT_IRQ_VECTOR 0
-#define QM_AEQ_EVENT_IRQ_VECTOR 1
-#define QM_CMD_EVENT_IRQ_VECTOR 2
-#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
+#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_SHIFT 16
+#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_CMD_DATA_SHIFT 32
#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
+#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -77,6 +74,9 @@
#define QM_EQ_OVERFLOW 1
#define QM_CQE_ERROR 2
+#define QM_XQ_DEPTH_SHIFT 16
+#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
+
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
#define QM_DOORBELL_CMD_EQ 2
@@ -86,11 +86,7 @@
#define QM_DB_CMD_SHIFT_V1 16
#define QM_DB_INDEX_SHIFT_V1 32
#define QM_DB_PRIORITY_SHIFT_V1 48
-#define QM_QUE_ISO_CFG_V 0x0030
#define QM_PAGE_SIZE 0x0034
-#define QM_QUE_ISO_EN 0x100154
-#define QM_CAPBILITY 0x100158
-#define QM_QP_NUN_MASK GENMASK(10, 0)
#define QM_QP_DB_INTERVAL 0x10000
#define QM_MEM_START_INIT 0x100040
@@ -126,7 +122,6 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
@@ -144,8 +139,10 @@
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
-#define QM_RAS_MSI_INT_SEL 0x1040f4
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_ECC_MBIT BIT(2)
+#define QM_DB_TIMEOUT BIT(10)
+#define QM_OF_FIFO_OF BIT(11)
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -205,6 +202,8 @@
#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
+#define QM_FUNC_CAPS_REG 0x3100
+#define QM_CAPBILITY_VERSION GENMASK(7, 0)
#define PCI_BAR_2 2
#define PCI_BAR_4 4
@@ -221,7 +220,6 @@
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
-#define QM_EQ_DEPTH (1024 * 2)
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
@@ -270,8 +268,8 @@
((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
-#define QM_MK_CQC_DW3_V2(cqe_sz) \
- ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
+#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
+ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
(((priority) << QM_SQ_PRIORITY_SHIFT) | \
@@ -284,8 +282,8 @@
((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define QM_MK_SQC_DW3_V2(sqe_sz) \
- ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
+#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
+ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define INIT_QC_COMMON(qc, base, pasid) do { \
(qc)->head = 0; \
@@ -329,6 +327,48 @@ enum qm_mb_cmd {
QM_VF_GET_QOS,
};
+enum qm_basic_type {
+ QM_TOTAL_QP_NUM_CAP = 0x0,
+ QM_FUNC_MAX_QP_CAP,
+ QM_XEQ_DEPTH_CAP,
+ QM_QP_DEPTH_CAP,
+ QM_EQ_IRQ_TYPE_CAP,
+ QM_AEQ_IRQ_TYPE_CAP,
+ QM_ABN_IRQ_TYPE_CAP,
+ QM_PF2VF_IRQ_TYPE_CAP,
+ QM_PF_IRQ_NUM_CAP,
+ QM_VF_IRQ_NUM_CAP,
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
+ {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
+};
+
+static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
+ {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
+ {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
+ {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -421,15 +461,11 @@ struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
u8 cmd, u16 index, u8 priority);
- u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
+ void (*hw_error_init)(struct hisi_qm *qm);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
- int (*stop_qp)(struct hisi_qp *qp);
int (*set_msi)(struct hisi_qm *qm, bool set);
- int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
- int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
{50100, 100000, 19}
};
+static void qm_irqs_unregister(struct hisi_qm *qm);
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
}
/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
+static bool qm_check_dev_error(struct hisi_qm *qm)
{
u32 val, dev_val;
if (qm->fun_type == QM_HW_VF)
- return 0;
+ return false;
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
+ val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
+ dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
+ return val || dev_val;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
+ int ret;
+ u32 val;
+
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
+ ret = -EBUSY;
goto mb_busy;
}
@@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
+ ret = -ETIMEDOUT;
+ goto mb_busy;
+ }
+
+ val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
+ if (val & QM_MB_STATUS_MASK) {
+ dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
+ ret = -EIO;
goto mb_busy;
}
@@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
mb_busy:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
- return -EBUSY;
+ return ret;
}
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
@@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm)
POLL_TIMEOUT);
}
-static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
+/**
+ * hisi_qm_get_hw_info() - Get device information.
+ * @qm: The qm which want to get information.
+ * @info_table: Array for storing device information.
+ * @index: Index in info_table.
+ * @is_read: Whether read from reg, 0: not support read from reg.
+ *
+ * This function returns device information the caller needs.
+ */
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read)
{
- return QM_IRQ_NUM_V1;
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return (val >> info_table[index].shift) & info_table[index].mask;
+ }
}
+EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
-static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
+static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ u16 *high_bits, enum qm_basic_type type)
{
- if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
- else
- return QM_IRQ_NUM_VF_V2;
+ u32 depth;
+
+ depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+ *high_bits = depth & QM_XQ_DEPTH_MASK;
+ *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}
-static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+static u32 qm_get_irq_num(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
- return QM_IRQ_NUM_VF_V3;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
}
static int qm_pm_get_sync(struct hisi_qm *qm)
@@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return 0;
ret = pm_runtime_resume_and_get(dev);
@@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_mark_last_busy(dev);
@@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
static void qm_cq_head_update(struct hisi_qp *qp)
{
- if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+ if (qp->qp_status.cq_head == qp->cq_depth - 1) {
qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
qp->qp_status.cq_head = 0;
} else {
@@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
struct hisi_qm *qm = poll_data->qm;
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+ u16 eq_depth = qm->eq_depth;
int eqe_num = 0;
u16 cqn;
@@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
poll_data->qp_finish_id[eqe_num] = cqn;
eqe_num++;
- if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
+ if (qm->status.eq_head == eq_depth - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
qm->status.eq_head++;
}
- if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+ if (eqe_num == (eq_depth >> 1) - 1)
break;
}
@@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
@@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
break;
}
- if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.aeq_head == aeq_depth - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
aeqe = qm->aeqe;
qm->status.aeq_head = 0;
@@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-static void qm_irq_unregister(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
-
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->ver > QM_HW_V1) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
-
- if (qm->ver > QM_HW_V2)
- free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
-}
-
static void qm_init_qp_status(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
u32 page_type = 0x0;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
switch (PAGE_SIZE) {
@@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
}
break;
case SHAPER_VFT:
- if (qm->ver >= QM_HW_V3) {
+ if (factor) {
tmp = factor->cir_b |
(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
@@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
- struct qm_shaper_factor *factor = &qm->factor[fun_num];
+ struct qm_shaper_factor *factor = NULL;
unsigned int val;
int ret;
+ if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ factor = &qm->factor[fun_num];
+
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), POLL_PERIOD,
POLL_TIMEOUT);
@@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
}
/* init default shaper qos val */
- if (qm->ver >= QM_HW_V3) {
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
ret = qm_shaper_init_vft(qm, fun_num);
if (ret)
goto back_sqc_cqc;
@@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return 0;
back_sqc_cqc:
- for (i = SQC_VFT; i <= CQC_VFT; i++) {
- ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
- if (ret)
- return ret;
- }
+ for (i = SQC_VFT; i <= CQC_VFT; i++)
+ qm_set_vft_common(qm, i, fun_num, 0, 0);
+
return ret;
}
@@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
kfree(ctx_addr);
}
-static int dump_show(struct hisi_qm *qm, void *info,
+static void dump_show(struct hisi_qm *qm, void *info,
unsigned int info_size, char *info_name)
{
struct device *dev = &qm->pdev->dev;
- u8 *info_buf, *info_curr = info;
+ u8 *info_curr = info;
u32 i;
#define BYTE_PER_DW 4
- info_buf = kzalloc(info_size, GFP_KERNEL);
- if (!info_buf)
- return -ENOMEM;
-
- for (i = 0; i < info_size; i++, info_curr++) {
- if (i % BYTE_PER_DW == 0)
- info_buf[i + 3UL] = *info_curr;
- else if (i % BYTE_PER_DW == 1)
- info_buf[i + 1UL] = *info_curr;
- else if (i % BYTE_PER_DW == 2)
- info_buf[i - 1] = *info_curr;
- else if (i % BYTE_PER_DW == 3)
- info_buf[i - 3] = *info_curr;
- }
-
dev_info(dev, "%s DUMP\n", info_name);
- for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
- info_buf[i], info_buf[i + 1UL],
- info_buf[i + 2UL], info_buf[i + 3UL]);
+ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
}
-
- kfree(info_buf);
-
- return 0;
}
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
@@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
- ret = dump_show(qm, sqc_curr, sizeof(*sqc),
- "SOFT SQC");
- if (ret)
- dev_info(dev, "Show soft sqc failed!\n");
+ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
- if (ret)
- dev_info(dev, "Show hw sqc failed!\n");
+ dump_show(qm, sqc, sizeof(*sqc), "SQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
- return ret;
+ return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
@@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
- ret = dump_show(qm, cqc_curr, sizeof(*cqc),
- "SOFT CQC");
- if (ret)
- dev_info(dev, "Show soft cqc failed!\n");
+ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
- if (ret)
- dev_info(dev, "Show hw cqc failed!\n");
+ dump_show(qm, cqc, sizeof(*cqc), "CQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
- return ret;
+ return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
@@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
if (ret)
goto err_free_ctx;
- ret = dump_show(qm, xeqc, size, name);
- if (ret)
- dev_info(dev, "Show hw %s failed!\n", name);
+ dump_show(qm, xeqc, size, name);
err_free_ctx:
qm_ctx_free(qm, size, xeqc, &xeqc_dma);
@@ -2025,7 +2050,7 @@ err_free_ctx:
}
static int q_dump_param_parse(struct hisi_qm *qm, char *s,
- u32 *e_id, u32 *q_id)
+ u32 *e_id, u32 *q_id, u16 q_depth)
{
struct device *dev = &qm->pdev->dev;
unsigned int qp_num = qm->qp_num;
@@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
}
ret = kstrtou32(presult, 0, e_id);
- if (ret || *e_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ if (ret || *e_id >= q_depth) {
+ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
return -EINVAL;
}
@@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
+ u16 sq_depth = qm->qp_array->cq_depth;
void *sqe, *sqe_curr;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
- if (ret)
- dev_info(dev, "Show sqe failed!\n");
+ dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
kfree(sqe);
- return ret;
+ return 0;
}
static int qm_cq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
u32 qp_id, cqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
if (ret)
return ret;
qp = &qm->qp_array[qp_id];
cqe_curr = qp->cqe + cqe_id;
- ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- if (ret)
- dev_info(dev, "Show cqe failed!\n");
+ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- return ret;
+ return 0;
}
static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
@@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
if (ret)
return -EINVAL;
- if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
- dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+ dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
- } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+ dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
}
@@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
goto err_unlock;
}
- ret = dump_show(qm, xeqe, size, name);
- if (ret)
- dev_info(dev, "Show %s failed!\n", name);
+ dump_show(qm, xeqe, size, name);
err_unlock:
up_read(&qm->qps_lock);
@@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
return ret;
/* Judge if the instance is being reset. */
- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
- return 0;
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+ ret = 0;
+ goto put_dfx_access;
+ }
if (count > QM_DBG_WRITE_LEN) {
ret = -ENOSPC;
@@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
file->debug = &qm->debug;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- qm->error_mask = ce | nfe | fe;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR,
- qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v2(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v3(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
/* disable close master ooo when hardware error happened */
writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
@@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp, val;
+ u32 error_status, tmp;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
- /* ce error does not need to be reset */
- if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
- writel(error_status, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe,
- qm->io_base + QM_RAS_NFE_ENABLE);
- return ACC_ERR_RECOVERED;
- }
+ if (error_status & qm->err_info.qm_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_NEED_RESET;
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
u64 val;
u32 i;
- if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return 0;
while (true) {
@@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
- .get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
.set_msi = qm_set_msi,
};
@@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
@@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v3,
.hw_error_init = qm_hw_error_init_v3,
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
- .stop_qp = qm_stop_qp,
.set_msi = qm_set_msi_v3,
- .ping_all_vfs = qm_ping_all_vfs,
- .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
+ if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp = &qm->qp_array[qp_id];
hisi_qm_unset_hw_reset(qp);
- memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
qp->event_cb = NULL;
qp->req_cb = NULL;
@@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
- sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
} else {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
sqc->w8 = 0; /* rand_qc */
}
sqc->cq_num = cpu_to_le16(qp_id);
@@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
QM_QC_CQE_SIZE));
- cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
} else {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc->w8 = 0; /* rand_qc */
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
@@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
{
int qp_used = atomic_read(&qp->qp_status.used);
u16 cur_tail = qp->qp_status.sq_tail;
- u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ u16 sq_depth = qp->sq_depth;
+ u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
struct hisi_qm *qm = qp->qm;
u16 pos;
int i;
for (i = 0; i < qp_used; i++) {
- pos = (i + cur_head) % QM_Q_DEPTH;
+ pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
atomic_dec(&qp->qp_status.used);
}
@@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
return 0;
/* Kunpeng930 supports drain qp by device */
- if (qm->ops->stop_qp) {
- ret = qm->ops->stop_qp(qp);
+ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+ ret = qm_stop_qp(qp);
if (ret)
dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
return ret;
@@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+ u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
@@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
{
struct hisi_qp *qp = q->priv;
- hisi_qm_cache_wb(qp->qm);
hisi_qm_release_qp(qp);
}
@@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
+ } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
@@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
unsigned long arg)
{
struct hisi_qp *qp = q->priv;
+ struct hisi_qp_info qp_info;
struct hisi_qp_ctx qp_ctx;
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
@@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
if (copy_to_user((void __user *)arg, &qp_ctx,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- } else {
- return -EINVAL;
+
+ return 0;
+ } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
+ if (copy_from_user(&qp_info, (void __user *)arg,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ qp_info.sqe_size = qp->qm->sqe_size;
+ qp_info.sq_depth = qp->sq_depth;
+ qp_info.cq_depth = qp->cq_depth;
+
+ if (copy_to_user((void __user *)arg, &qp_info,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static const struct uacce_ops uacce_qm_ops = {
@@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
struct uacce_device *uacce;
unsigned long mmio_page_nr;
unsigned long dus_page_nr;
+ u16 sq_depth, cq_depth;
struct uacce_interface interface = {
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
@@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->is_vf = pdev->is_virtfn;
uacce->priv = qm;
- uacce->algs = qm->algs;
if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
@@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
- else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
+ else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* Add one more page for device or qp status */
- dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
+ sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
@@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
kfree(qm->qp_array);
}
-static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
+ u16 sq_depth, u16 cq_depth)
{
struct device *dev = &qm->pdev->dev;
- size_t off = qm->sqe_size * QM_Q_DEPTH;
+ size_t off = qm->sqe_size * sq_depth;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
qp->cqe = qp->qdma.va + off;
qp->cqe_dma = qp->qdma.dma + off;
qp->qdma.size = dma_size;
+ qp->sq_depth = sq_depth;
+ qp->cq_depth = cq_depth;
qp->qm = qm;
qp->qp_id = id;
@@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
qm->misc_ctl = false;
- if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
}
@@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
val = readl(qm->io_base + QM_IFC_INT_MASK);
@@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
/* Clear communication interrupt source */
@@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
iounmap(qm->io_base);
@@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
}
idr_destroy(&qm->qp_idr);
- kfree(qm->factor);
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
}
/**
@@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
if (qm->use_sva) {
uacce_remove(qm->uacce);
@@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
DMA_TO_DEVICE);
@@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
@@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
static void qm_hw_error_init(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
-
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
+ qm->ops->hw_error_init(qm);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm)
qm->mb_qos = 0;
/* vf ping pf to get function qos */
- if (qm->ops->ping_pf) {
- ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
- if (ret) {
- pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
- return ret;
- }
+ ret = qm_ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
}
while (true) {
@@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = {
* hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
* @qm: The qm for which we want to add debugfs files.
*
- * Create function qos debugfs files.
+ * Create function qos debugfs files, VF ping PF to get function qos.
*/
static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
qm, &qm_algqos_fops);
- else
+ else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
qm, &qm_algqos_fops);
}
@@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
&qm_atomic64_ops);
}
- if (qm->ver >= QM_HW_V3)
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
hisi_qm_set_algqos_init(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
@@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+{
+ int i;
+
+ for (i = 1; i <= total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+}
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
- num_vfs = min_t(int, max_vfs, total_vfs);
+ if (max_vfs > total_vfs) {
+ pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
+ ret = -ERANGE;
+ goto err_put_sync;
+ }
+
+ num_vfs = max_vfs;
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ hisi_qm_init_vf_qos(qm, num_vfs);
+
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
@@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
int ret;
if (pci_vfs_assigned(pdev)) {
@@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
- /* clear vf function shaper configure array */
- memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
ret = qm_clear_vft_config(qm);
if (ret)
return ret;
@@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
- /* ce error does not need to be reset */
- if ((err_sts | qm->err_info.dev_ce_mask) ==
- qm->err_info.dev_ce_mask) {
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm,
- err_sts);
+ if (err_sts & qm->err_info.dev_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_RECOVERED;
- }
-
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
}
return ACC_ERR_RECOVERED;
@@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return 0;
/* Kunpeng930 supports to notify VFs to stop before PF reset */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
} else {
@@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
}
/* Kunpeng930 supports to notify VFs to start after PF reset. */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
} else {
@@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
if (pdev->is_virtfn)
return PCI_ERS_RESULT_RECOVERED;
- pci_aer_clear_nonfatal_status(pdev);
-
/* reset pcie device controller */
ret = qm_controller_reset(qm);
if (ret) {
@@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, 0, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver > QM_HW_V1) {
- ret = request_threaded_irq(pci_irq_vector(pdev,
- QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, qm_aeq_thread,
- 0, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- if (qm->ver > QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
- qm_mb_cmd_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_mb_cmd_irq;
- }
-
- return 0;
-
-err_mb_cmd_irq:
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
/**
* hisi_qm_dev_shutdown() - Shutdown device.
@@ -5711,7 +5707,7 @@ err_prepare:
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
out:
pci_save_state(pdev);
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
}
@@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
cmd = QM_VF_START_FAIL;
}
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
@@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+ qm_aeq_thread, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_irqs_unregister(struct hisi_qm *qm)
+{
+ qm_unregister_mb_cmd_irq(qm);
+ qm_unregister_abnormal_irq(qm);
+ qm_unregister_aeq_irq(qm);
+ qm_unregister_eq_irq(qm);
+}
+
+static int qm_irqs_register(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_register_eq_irq(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_register_aeq_irq(qm);
+ if (ret)
+ goto free_eq_irq;
+
+ ret = qm_register_abnormal_irq(qm);
+ if (ret)
+ goto free_aeq_irq;
+
+ ret = qm_register_mb_cmd_irq(qm);
+ if (ret)
+ goto free_abnormal_irq;
+
+ return 0;
+
+free_abnormal_irq:
+ qm_unregister_abnormal_irq(qm);
+free_aeq_irq:
+ qm_unregister_aeq_irq(qm);
+free_eq_irq:
+ qm_unregister_eq_irq(qm);
+ return ret;
+}
+
static int qm_get_qp_num(struct hisi_qm *qm)
{
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = QM_QNUM_V1;
- else if (qm->ver == QM_HW_V2)
- qm->ctrl_qp_num = QM_QNUM_V2;
- else
- qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
- QM_QP_NUN_MASK;
+ bool is_db_isolation;
- if (qm->use_db_isolation)
- qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
- QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
- else
- qm->max_qp_num = qm->ctrl_qp_num;
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver != QM_HW_V1)
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+
+ return 0;
+ }
+
+ is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+ qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
/* check if qp number is valid */
if (qm->qp_num > qm->max_qp_num) {
@@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
+static void qm_get_hw_caps(struct hisi_qm *qm)
+{
+ const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ qm_cap_info_pf : qm_cap_info_vf;
+ u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
+ ARRAY_SIZE(qm_cap_info_vf);
+ u32 val, i;
+
+ /* Doorbell isolate register is a independent register. */
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
+ if (val)
+ set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+
+ if (qm->ver >= QM_HW_V3) {
+ val = readl(qm->io_base + QM_FUNC_CAPS_REG);
+ qm->cap_ver = val & QM_CAPBILITY_VERSION;
+ }
+
+ /* Get PF/VF common capbility */
+ for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
+ if (val)
+ set_bit(qm_cap_info_comm[i].type, &qm->caps);
+ }
+
+ /* Get PF/VF different capbility */
+ for (i = 0; i < size; i++) {
+ val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
+ if (val)
+ set_bit(cap_info[i].type, &qm->caps);
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
- if (qm->ver > QM_HW_V2) {
- if (qm->fun_type == QM_HW_PF)
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_EN) & BIT(0);
- else
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_CFG_V) & BIT(0);
- }
-
- if (qm->use_db_isolation) {
+ qm_get_hw_caps(qm);
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
qm->db_interval = QM_QP_DB_INTERVAL;
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
qm->db_io_base = ioremap(qm->db_phys_base,
@@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = qm_get_qp_num(qm);
- if (ret)
- goto err_db_ioremap;
- }
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
return 0;
err_db_ioremap:
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
err_ioremap:
iounmap(qm->io_base);
@@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
pci_set_master(pdev);
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_get_pci_res;
- }
- num_vec = qm->ops->get_irq_num(qm);
+ num_vec = qm_get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm)
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ u16 sq_depth, cq_depth;
size_t qp_dma_size;
int i, ret;
@@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
return -ENOMEM;
}
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
for (i = 0; i < qm->qp_num; i++) {
qm->poll_data[i].qm = qm;
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
if (ret)
goto err_init_qp_mem;
@@ -6116,15 +6305,18 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_func, i;
+ int ret, total_func;
size_t off = 0;
- total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
- qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
- if (!qm->factor)
- return -ENOMEM;
- for (i = 0; i < total_func; i++)
- qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+ /* Only the PF value needs to be initialized */
+ qm->factor[0].func_qos = QM_QOS_MAX_VAL;
+ }
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
GFP_ATOMIC);
dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
if (!qm->qdma.va) {
- ret = -ENOMEM;
- goto err_alloc_qdma;
+ ret = -ENOMEM;
+ goto err_destroy_idr;
}
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, qm->eq_depth);
+ QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
err_alloc_qp_array:
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-err_alloc_qdma:
- kfree(qm->factor);
+err_destroy_idr:
+ idr_destroy(&qm->qp_idr);
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
return ret;
}
@@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
return ret;
- ret = qm_irq_register(qm);
+ ret = qm_irqs_register(qm);
if (ret)
goto err_pci_init;
- if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_irq_register;
- }
-
if (qm->fun_type == QM_HW_PF) {
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
@@ -6251,7 +6439,7 @@ err_alloc_uacce:
qm->uacce = NULL;
}
err_irq_register:
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
err_pci_init:
hisi_qm_pci_uninit(qm);
return ret;
@@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
@@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_get_noresume(dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d2a0bc93e752..3e57fc04b377 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -17,6 +17,7 @@ struct sec_alg_res {
dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u16 depth;
};
/* Cipher request of SEC private */
@@ -115,9 +116,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req *req_list[QM_Q_DEPTH];
+ struct sec_req **req_list;
struct idr req_idr;
- struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
struct list_head backlog;
@@ -191,8 +192,37 @@ struct sec_dev {
bool iommu_used;
};
+enum sec_cap_type {
+ SEC_QM_NFE_MASK_CAP = 0x0,
+ SEC_QM_RESET_MASK_CAP,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP,
+ SEC_QM_CE_MASK_CAP,
+ SEC_NFE_MASK_CAP,
+ SEC_RESET_MASK_CAP,
+ SEC_OOO_SHUTDOWN_MASK_CAP,
+ SEC_CE_MASK_CAP,
+ SEC_CLUSTER_NUM_CAP,
+ SEC_CORE_TYPE_NUM_CAP,
+ SEC_CORE_NUM_CAP,
+ SEC_CORES_PER_CLUSTER_NUM_CAP,
+ SEC_CORE_ENABLE_BITMAP,
+ SEC_DRV_ALG_BITMAP_LOW,
+ SEC_DRV_ALG_BITMAP_HIGH,
+ SEC_DEV_ALG_BITMAP_LOW,
+ SEC_DEV_ALG_BITMAP_HIGH,
+ SEC_CORE1_ALG_BITMAP_LOW,
+ SEC_CORE1_ALG_BITMAP_HIGH,
+ SEC_CORE2_ALG_BITMAP_LOW,
+ SEC_CORE2_ALG_BITMAP_HIGH,
+ SEC_CORE3_ALG_BITMAP_LOW,
+ SEC_CORE3_ALG_BITMAP_HIGH,
+ SEC_CORE4_ALG_BITMAP_LOW,
+ SEC_CORE4_ALG_BITMAP_HIGH,
+};
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 77c9f13cf69a..84ae8ddd1a13 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -59,14 +59,14 @@
#define SEC_ICV_MASK 0x000E
#define SEC_SQE_LEN_RATE_MASK 0x3
-#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
#define SEC_MAX_CCM_AAD_LEN 65279
-#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
@@ -74,11 +74,11 @@
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
-#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
-#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
- SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
-#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
- SEC_PBUF_LEFT_SZ)
+#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
+ SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
+ SEC_PBUF_LEFT_SZ(depth))
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
@@ -104,6 +104,16 @@
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+struct sec_skcipher {
+ u64 alg_msk;
+ struct skcipher_alg alg;
+};
+
+struct sec_aead {
+ u64 alg_msk;
+ struct aead_alg alg;
+};
+
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -128,9 +138,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
int req_id;
spin_lock_bh(&qp_ctx->req_lock);
-
- req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
- 0, QM_Q_DEPTH, GFP_ATOMIC);
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
@@ -148,7 +156,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -300,14 +308,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->c_ivin_dma, GFP_KERNEL);
if (!res->c_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
}
@@ -318,20 +327,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->c_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->c_ivin, res->c_ivin_dma);
}
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->a_ivin_dma, GFP_KERNEL);
if (!res->a_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
}
@@ -342,20 +352,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->a_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->a_ivin, res->a_ivin_dma);
}
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
&res->out_mac_dma, GFP_KERNEL);
if (!res->out_mac)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].out_mac_dma = res->out_mac_dma +
i * (SEC_MAX_MAC_LEN << 1);
res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
@@ -367,14 +378,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->out_mac)
- dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
res->out_mac, res->out_mac_dma);
}
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->pbuf)
- dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
res->pbuf, res->pbuf_dma);
}
@@ -384,10 +395,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
*/
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
+ int size = SEC_PBUF_PAGE_NUM(q_depth);
int pbuf_page_offset;
int i, j, k;
- res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
&res->pbuf_dma, GFP_KERNEL);
if (!res->pbuf)
return -ENOMEM;
@@ -400,11 +413,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
* for the SEC_TOTAL_PBUF_SZ
*/
- for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ for (i = 0; i <= size; i++) {
pbuf_page_offset = PAGE_SIZE * i;
for (j = 0; j < SEC_PBUF_NUM; j++) {
k = i * SEC_PBUF_NUM + j;
- if (k == QM_Q_DEPTH)
+ if (k == q_depth)
break;
res[k].pbuf = res->pbuf +
j * SEC_PBUF_PKG + pbuf_page_offset;
@@ -470,36 +483,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_mac_resource(dev, qp_ctx->res);
}
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
- int qp_ctx_id, int alg_type)
+static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
+ u16 q_depth = qp_ctx->qp->sq_depth;
struct device *dev = ctx->dev;
- struct sec_qp_ctx *qp_ctx;
- struct hisi_qp *qp;
int ret = -ENOMEM;
- qp_ctx = &ctx->qp_ctx[qp_ctx_id];
- qp = ctx->qps[qp_ctx_id];
- qp->req_type = 0;
- qp->qp_ctx = qp_ctx;
- qp_ctx->qp = qp;
- qp_ctx->ctx = ctx;
-
- qp->req_cb = sec_req_cb;
+ qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
+ if (!qp_ctx->req_list)
+ return ret;
- spin_lock_init(&qp_ctx->req_lock);
- idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
+ if (!qp_ctx->res)
+ goto err_free_req_list;
+ qp_ctx->res->depth = q_depth;
- qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_destroy_idr;
+ goto err_free_res;
}
- qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
@@ -509,34 +515,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
if (ret)
goto err_free_c_out_pool;
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_queue_free;
-
return 0;
-err_queue_free:
- sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_destroy_idr:
- idr_destroy(&qp_ctx->req_idr);
+err_free_res:
+ kfree(qp_ctx->res);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
return ret;
}
-static void sec_release_qp_ctx(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
+static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
struct device *dev = ctx->dev;
- hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
-
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+ kfree(qp_ctx->res);
+ kfree(qp_ctx->req_list);
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret;
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ qp->req_cb = sec_req_cb;
+
+ spin_lock_init(&qp_ctx->req_lock);
+ idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
+
+ ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+ if (ret)
+ goto err_destroy_idr;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_resource_free;
+
+ return 0;
+
+err_resource_free:
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ hisi_qm_stop_qp(qp_ctx->qp);
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@@ -559,7 +603,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->pbuf_supported = ctx->sec->iommu_used;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -1679,7 +1723,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
-
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1966,7 +2009,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-
static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
struct sec_req *sreq)
{
@@ -2126,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\
.ivsize = iv_size,\
-},
+}
#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_skciphers[] = {
- SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-};
-
-static struct skcipher_alg sec_skciphers_v3[] = {
- SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+static struct sec_skcipher sec_skciphers[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(1),
+ .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(2),
+ .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(3),
+ .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(4),
+ .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(5),
+ .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(12),
+ .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(13),
+ .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(14),
+ .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(15),
+ .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(16),
+ .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(23),
+ .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(24),
+ .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
+ DES3_EDE_BLOCK_SIZE),
+ },
};
static int aead_iv_demension_check(struct aead_request *aead_req)
@@ -2380,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.maxauthsize = max_authsize,\
}
-static struct aead_alg sec_aeads[] = {
- SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
- sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+static struct sec_aead sec_aeads[] = {
+ {
+ .alg_msk = BIT(6),
+ .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(7),
+ .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(17),
+ .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(18),
+ .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(43),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
+ sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(44),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
+ sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(45),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
+ sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ },
+};
- SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
- sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+static void sec_unregister_skcipher(u64 alg_mask, int end)
+{
+ int i;
- SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
- sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ for (i = 0; i < end; i++)
+ if (sec_skciphers[i].alg_msk & alg_mask)
+ crypto_unregister_skcipher(&sec_skciphers[i].alg);
+}
- SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+static int sec_register_skcipher(u64 alg_mask)
+{
+ int i, ret, count;
- SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ count = ARRAY_SIZE(sec_skciphers);
-static struct aead_alg sec_aeads_v3[] = {
- SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ for (i = 0; i < count; i++) {
+ if (!(sec_skciphers[i].alg_msk & alg_mask))
+ continue;
- SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ ret = crypto_register_skcipher(&sec_skciphers[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_skcipher(alg_mask, i);
+
+ return ret;
+}
+
+static void sec_unregister_aead(u64 alg_mask, int end)
+{
+ int i;
+
+ for (i = 0; i < end; i++)
+ if (sec_aeads[i].alg_msk & alg_mask)
+ crypto_unregister_aead(&sec_aeads[i].alg);
+}
+
+static int sec_register_aead(u64 alg_mask)
+{
+ int i, ret, count;
+
+ count = ARRAY_SIZE(sec_aeads);
+
+ for (i = 0; i < count; i++) {
+ if (!(sec_aeads[i].alg_msk & alg_mask))
+ continue;
+
+ ret = crypto_register_aead(&sec_aeads[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_aead(alg_mask, i);
+
+ return ret;
+}
int sec_register_to_crypto(struct hisi_qm *qm)
{
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
int ret;
- /* To avoid repeat register */
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ ret = sec_register_skcipher(alg_mask);
if (ret)
return ret;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- if (ret)
- goto reg_skcipher_fail;
- }
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ ret = sec_register_aead(alg_mask);
if (ret)
- goto reg_aead_fail;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
- if (ret)
- goto reg_aead_v3_fail;
- }
- return ret;
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
-reg_aead_v3_fail:
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
-reg_aead_fail:
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
-reg_skcipher_fail:
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
- if (qm->ver > QM_HW_V2)
- crypto_unregister_aeads(sec_aeads_v3,
- ARRAY_SIZE(sec_aeads_v3));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2c0be91c0b09..3705412bac5f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -27,7 +27,6 @@
#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 2
@@ -42,16 +41,11 @@
#define SEC_ECC_NUM 16
#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x7c1ff
-#define SEC_CORE_INT_CLEAR 0x7c1ff
-#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
#define SEC_RAS_NFE_REG 0x301058
-#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x7c177
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
@@ -119,6 +113,16 @@
#define SEC_DFX_COMMON1_LEN 0x45
#define SEC_DFX_COMMON2_LEN 0xBA
+#define SEC_ALG_BITMAP_SHIFT 32
+
+#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
+ GENMASK(24, 21))
+#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
+ GENMASK_ULL(42, 25))
+#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ GENMASK_ULL(45, 43))
+#define SEC_DEV_ALG_MAX_LEN 256
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -129,6 +133,11 @@ struct sec_dfx_item {
u32 offset;
};
+struct sec_dev_alg {
+ u64 alg_msk;
+ const char *algs;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
@@ -137,6 +146,46 @@ static struct hisi_qm_list sec_devices = {
.unregister_from_crypto = sec_unregister_from_crypto,
};
+static const struct hisi_qm_cap_info sec_basic_info[] = {
+ {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
+ {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
+ {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
+ {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
+ {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
+ {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
+ {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+};
+
+static const struct sec_dev_alg sec_dev_algs[] = { {
+ .alg_msk = SEC_CIPHER_BITMAP,
+ .algs = "cipher\n",
+ }, {
+ .alg_msk = SEC_DIGEST_BITMAP,
+ .algs = "digest\n",
+ }, {
+ .alg_msk = SEC_AEAD_BITMAP,
+ .algs = "aead\n",
+ },
+};
+
static const struct sec_hw_error sec_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -339,6 +388,16 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+{
+ u32 cap_val_h, cap_val_l;
+
+ cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+ cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+
+ return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+}
+
static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -415,7 +474,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -435,7 +494,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
@@ -506,7 +565,8 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
+ reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
+ writel(reg, qm->io_base + SEC_SAA_EN_REG);
if (qm->ver < QM_HW_V3) {
/* HW V2 enable sm4 extra mode, as ctr/ecb */
@@ -576,7 +636,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = SEC_RAS_NFE_ENB_MSK;
+ val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -590,25 +651,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
+ ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(ce, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -939,7 +1005,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -955,14 +1025,20 @@ static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = SEC_RAS_FE_ENB_MSK;
+ err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -1001,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return ret;
}
+static int sec_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u64 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+
+ for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+ if (alg_mask & sec_dev_algs[i].alg_msk)
+ strcat(algs, sec_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1028,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init sec qm configures!\n");
+ return ret;
+ }
+
+ ret = sec_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set sec algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void sec_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 3dfd3bac5a33..f2e6da3240ae 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index ad35434a3fdb..6608971d10cd 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -39,6 +39,9 @@
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
+#define HZIP_ALG_ZLIB GENMASK(1, 0)
+#define HZIP_ALG_GZIP GENMASK(3, 2)
+
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
@@ -123,19 +126,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static const struct kernel_param_ops sgl_sge_nr_ops = {
.set = sgl_sge_nr_set,
- .get = param_get_int,
+ .get = param_get_ushort,
};
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static u16 get_extra_field_size(const u8 *start)
+static u32 get_extra_field_size(const u8 *start)
{
return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
}
@@ -167,7 +170,7 @@ static u32 __get_gzip_head_size(const u8 *src)
return size;
}
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -183,7 +186,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
+ if (unlikely(ret != head_size)) {
pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
}
@@ -193,11 +196,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
- if (!acomp_req->src || !acomp_req->slen)
+ if (unlikely(!acomp_req->src || !acomp_req->slen))
return -EINVAL;
- if (req_type == HZIP_ALG_TYPE_GZIP &&
- acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
return -EINVAL;
switch (req_type) {
@@ -230,6 +233,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
}
set_bit(req_id, req_q->req_bitmap);
+ write_unlock(&req_q->req_lock);
+
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
@@ -242,8 +247,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache->dskip = 0;
}
- write_unlock(&req_q->req_lock);
-
return req_cache;
}
@@ -254,7 +257,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
write_unlock(&req_q->req_lock);
}
@@ -339,7 +341,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe zip_sqe;
int ret;
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
@@ -365,7 +367,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
dev_dbg_ratelimited(dev, "failed to send request!\n");
@@ -417,7 +419,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = ops->get_status(sqe);
- if (status != 0 && status != HZIP_NC_ERR) {
+ if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
@@ -450,7 +452,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
head_size);
return head_size;
@@ -461,7 +463,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
}
@@ -478,7 +480,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
int head_size, ret;
head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
head_size);
return head_size;
@@ -489,7 +491,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
ret);
hisi_zip_remove_req(qp_ctx, req);
@@ -498,7 +500,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
int alg_type, int req_type)
{
struct device *dev = &qp->qm->pdev->dev;
@@ -506,7 +508,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
qp->req_type = req_type;
qp->alg_type = alg_type;
- qp->qp_ctx = ctx;
+ qp->qp_ctx = qp_ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
@@ -514,15 +516,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
return ret;
}
- ctx->qp = qp;
+ qp_ctx->qp = qp;
return 0;
}
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
+static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
{
- hisi_qm_stop_qp(ctx->qp);
- hisi_qm_free_qps(&ctx->qp, 1);
+ hisi_qm_stop_qp(qp_ctx->qp);
+ hisi_qm_free_qps(&qp_ctx->qp, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
@@ -594,18 +596,19 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
int i;
- for (i = 1; i >= 0; i--)
+ for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_req_q *req_q;
int i, ret;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
req_q = &ctx->qp_ctx[i].req_q;
- req_q->size = QM_Q_DEPTH;
+ req_q->size = q_depth;
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
if (!req_q->req_bitmap) {
@@ -613,7 +616,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (i == 0)
return ret;
- goto err_free_loop0;
+ goto err_free_comp_q;
}
rwlock_init(&req_q->req_lock);
@@ -622,19 +625,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)
- goto err_free_bitmap;
+ goto err_free_comp_bitmap;
else
- goto err_free_loop1;
+ goto err_free_decomp_bitmap;
}
}
return 0;
-err_free_loop1:
+err_free_decomp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
-err_free_loop0:
+err_free_comp_q:
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
-err_free_bitmap:
+err_free_comp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -651,6 +654,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_qp_ctx *tmp;
struct device *dev;
int i;
@@ -658,7 +662,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
dev = &tmp->qp->qm->pdev->dev;
- tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
sgl_sge_nr);
if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
@@ -755,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = {
}
};
+static int hisi_zip_register_zlib(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return 0;
+
+ ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
+{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return;
+
+ crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
static struct acomp_alg hisi_zip_acomp_gzip = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -769,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+static int hisi_zip_register_gzip(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
- if (ret) {
- pr_err("failed to register to zlib (%d)!\n", ret);
- return ret;
- }
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return 0;
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
- if (ret) {
- pr_err("failed to register to gzip (%d)!\n", ret);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
- }
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
return ret;
}
-void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return;
+
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+{
+ int ret = 0;
+
+ ret = hisi_zip_register_zlib(qm);
+ if (ret)
+ return ret;
+
+ ret = hisi_zip_register_gzip(qm);
+ if (ret)
+ hisi_zip_unregister_zlib(qm);
+
+ return ret;
+}
+
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+{
+ hisi_zip_unregister_zlib(qm);
+ hisi_zip_unregister_gzip(qm);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c3303d99acac..c863435e8c75 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -20,18 +20,6 @@
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_CLOCK_GATE_CTRL 0x301004
-#define COMP0_ENABLE BIT(0)
-#define COMP1_ENABLE BIT(1)
-#define DECOMP0_ENABLE BIT(2)
-#define DECOMP1_ENABLE BIT(3)
-#define DECOMP2_ENABLE BIT(4)
-#define DECOMP3_ENABLE BIT(5)
-#define DECOMP4_ENABLE BIT(6)
-#define DECOMP5_ENABLE BIT(7)
-#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
- DECOMP0_ENABLE | DECOMP1_ENABLE | \
- DECOMP2_ENABLE | DECOMP3_ENABLE | \
- DECOMP4_ENABLE | DECOMP5_ENABLE)
#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
@@ -69,20 +57,14 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
-#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
-#define HZIP_COMP_CORE_NUM 2
-#define HZIP_DECOMP_CORE_NUM 6
-#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
- HZIP_DECOMP_CORE_NUM)
#define HZIP_SQE_SIZE 128
-#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -92,6 +74,12 @@
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
+#define HZIP_DEV_ALG_MAX_LEN 256
+#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
+#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
+#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
@@ -132,6 +120,26 @@ struct zip_dfx_item {
u32 offset;
};
+struct zip_dev_alg {
+ u32 alg_msk;
+ const char *algs;
+};
+
+static const struct zip_dev_alg zip_dev_algs[] = { {
+ .alg_msk = HZIP_ALG_ZLIB_BIT,
+ .algs = "zlib\n",
+ }, {
+ .alg_msk = HZIP_ALG_GZIP_BIT,
+ .algs = "gzip\n",
+ }, {
+ .alg_msk = HZIP_ALG_DEFLATE_BIT,
+ .algs = "deflate\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .algs = "lz77_zstd\n",
+ },
+};
+
static struct hisi_qm_list zip_devices = {
.register_to_crypto = hisi_zip_register_to_crypto,
.unregister_from_crypto = hisi_zip_unregister_from_crypto,
@@ -187,6 +195,58 @@ struct hisi_zip_ctrl {
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
+enum zip_cap_type {
+ ZIP_QM_NFE_MASK_CAP = 0x0,
+ ZIP_QM_RESET_MASK_CAP,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_QM_CE_MASK_CAP,
+ ZIP_NFE_MASK_CAP,
+ ZIP_RESET_MASK_CAP,
+ ZIP_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_CE_MASK_CAP,
+ ZIP_CLUSTER_NUM_CAP,
+ ZIP_CORE_TYPE_NUM_CAP,
+ ZIP_CORE_NUM_CAP,
+ ZIP_CLUSTER_COMP_NUM_CAP,
+ ZIP_CLUSTER_DECOMP_NUM_CAP,
+ ZIP_DECOMP_ENABLE_BITMAP,
+ ZIP_COMP_ENABLE_BITMAP,
+ ZIP_DRV_ALG_BITMAP,
+ ZIP_DEV_ALG_BITMAP,
+ ZIP_CORE1_ALG_BITMAP,
+ ZIP_CORE2_ALG_BITMAP,
+ ZIP_CORE3_ALG_BITMAP,
+ ZIP_CORE4_ALG_BITMAP,
+ ZIP_CORE5_ALG_BITMAP,
+ ZIP_CAP_MAX
+};
+
+static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
+ {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
+ {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
+ {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
+ {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
+ {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
+ {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
+ {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
+ {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
+ {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
+ {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
+ {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+ {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+};
+
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
@@ -343,12 +403,52 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+ if ((alg & cap_val) == alg)
+ return true;
+
+ return false;
+}
+
+static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+ if (alg_mask & zip_dev_algs[i].alg_msk)
+ strcat(algs, zip_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -368,7 +468,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
@@ -401,6 +501,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
+ u32 dcomp_bm, comp_bm;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -438,8 +539,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- base + HZIP_CLOCK_GATE_CTRL);
+ dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+ comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+ writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
@@ -458,7 +562,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -472,6 +577,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -479,17 +586,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+
/* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(HZIP_CORE_INT_RAS_CE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
@@ -498,10 +605,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
/* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, false);
}
@@ -586,8 +696,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
return len;
tbuf[len] = '\0';
- if (kstrtoul(tbuf, 0, &val))
- return -EFAULT;
+ ret = kstrtoul(tbuf, 0, &val);
+ if (ret)
+ return ret;
ret = hisi_qm_get_dfx_access(qm);
if (ret)
@@ -651,18 +762,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
+ u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
scnprintf(buf, sizeof(buf), "decomp_core%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -675,7 +791,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_file("regs", 0444, tmp_d, regset,
- &hisi_zip_regs_fops);
+ &hisi_zip_regs_fops);
}
return 0;
@@ -795,10 +911,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_num;
int i, j, idx;
- debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
- com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+
+ debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
return -ENOMEM;
@@ -807,7 +926,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
debug->last_words[i] = readl_relaxed(io_base);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
+ for (i = 0; i < zip_core_num; i++) {
io_base = qm->io_base + core_offsets[i];
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
@@ -834,6 +953,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
{
int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
void __iomem *base;
@@ -847,15 +967,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
if (debug->last_words[i] != val)
pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
- hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
base = qm->io_base + core_offsets[i];
pci_info(qm->pdev, "==>%s:\n", buf);
@@ -865,7 +988,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
if (debug->last_words[idx] != val)
pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
- hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+ hzip_dump_dfx_regs[j].name,
+ debug->last_words[idx], val);
}
}
}
@@ -900,7 +1024,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -934,16 +1062,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
-
- if (qm->ver >= QM_HW_V3)
- err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
@@ -976,7 +1109,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini = &hisi_zip_err_ini;
qm->err_ini->err_info_init(qm);
- hisi_zip_set_user_domain_and_cache(qm);
+ ret = hisi_zip_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+
hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -990,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
- else
- qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1019,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init zip qm configures!\n");
+ return ret;
+ }
+
+ ret = hisi_zip_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set zip algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void hisi_zip_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d68ef16650d4..32a37e3850c5 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -63,7 +63,6 @@ struct safexcel_cipher_ctx {
u32 hash_alg;
u32 state_sz;
- struct crypto_cipher *hkaes;
struct crypto_aead *fback;
};
@@ -642,10 +641,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
/*
@@ -737,23 +742,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst));
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src);
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst);
- dma_unmap_sg(priv->dev, src, sreq->nr_src,
- DMA_TO_DEVICE);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unmap;
}
- dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+
+ if (sreq->nr_dst > 0)
+ dma_map_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
@@ -883,12 +894,18 @@ rdesc_rollback:
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-
+unmap:
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
return ret;
@@ -2589,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->key_len = len;
/* Compute hash key by encrypting zeroes with cipher key */
- crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- if (ret)
- return ret;
-
memset(hashkey, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+ aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
@@ -2626,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->xcm = EIP197_XCM_MODE_GCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
- ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->hkaes);
+ return 0;
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->hkaes);
safexcel_aead_cra_exit(tfm);
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index bc60b5802256..103fc551d2af 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- struct crypto_cipher *kaes;
+ struct crypto_aes_ctx *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
u32 x;
x = ipad[i] ^ ipad[i + 4];
- cache[i] ^= swab(x);
+ cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
@@ -821,10 +821,10 @@ static int safexcel_ahash_final(struct ahash_request *areq)
u32 *result = (void *)areq->result;
/* K3 */
- result[i] = swab(ctx->base.ipad.word[i + 4]);
+ result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
- crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ aes_encrypt(ctx->aes, areq->result, areq->result);
return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
@@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
/* precompute the XCBC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
-
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
- "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
- "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->base.ipad.word[i] = swab(key_tmp[i]);
-
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes,
- (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- AES_MIN_KEY_SIZE);
+ ctx->base.ipad.word[i] = swab32(key_tmp[i]);
+
+ ret = aes_expandkey(ctx->aes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
if (ret)
return ret;
@@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
@@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
- ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->kaes);
+ ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+ return PTR_ERR_OR_ZERO(ctx->aes);
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->kaes);
+ kfree(ctx->aes);
safexcel_ahash_cra_exit(tfm);
}
@@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
__be64 consts[4];
u64 _const[2];
u8 msb_mask, gfmask;
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ /* precompute the CMAC key material */
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
-
- /* precompute the CMAC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
+ ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */
memset(consts, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+ aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
@@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
}
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 7942b48dd55a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
config CRYPTO_DEV_KEEMBAY_OCS_ECC
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select CRYPTO_ECDH
select CRYPTO_ENGINE
@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
select CRYPTO_ENGINE
depends on HAS_IOMEM
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
Control Unit (HCU) hardware acceleration for use with Crypto API.
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index b8bdb9f134f3..205eacac4a34 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
- * which if the line hits and is is dirty will cause the line to be
+ * which if the line hits and is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b482198ebc..df9c2b8747e6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
static void set_ucode_filename(struct otx_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
u32 i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev,
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
+ unsigned int code_length;
/*
* If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev,
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
- ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Invalid code_length %u\n", code_length);
+ return -EINVAL;
+ }
+
+ ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
+ unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
- ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
@@ -1328,7 +1342,7 @@ static ssize_t ucode_load_store(struct device *dev,
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
err_msg = "Invalid engine group format";
- strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 36d72e35ebeb..88a41d1ca5f6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev,
msg = "Invalid";
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+ return sysfs_emit(buf, "%s\n", msg);
}
static ssize_t vf_engine_group_show(struct device *dev,
@@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+ return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
}
static ssize_t vf_engine_group_store(struct device *dev,
@@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_timewait(cptvf));
}
@@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_numwait(cptvf));
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
index 5663787c7a62..90fdafb7c468 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_READY;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_UP;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_DOWN;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index f10050fead16..1577986677f6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev,
int i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 02cb9e44afd8..75c403f2b1d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
- int ret;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
@@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
- ret = otx2_cpt_send_mbox_msg(mbox, pdev);
-
- return ret;
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 3b0bf6fea491..31e24df18877 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
*
* So we have to back-translate, going through the 'intr' and 'ino'
* property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to to figure out which
+ * 'interrupts' property entries, in order to figure out which
* devino goes to which already-translated IRQ.
*/
static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 3793885f928d..c843f4c6f684 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
unsigned int cryptlen, u8 *b0)
{
unsigned int l, lp, m = authsize;
- int rc;
memcpy(b0, iv, 16);
@@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
if (assoclen)
*b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
- return rc;
+ return set_msg_len(b0 + 16 - l, cryptlen, l);
}
static int generate_pat(u8 *iv,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index e61b3e13db3b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
return -ENOMEM;
INIT_LIST_HEAD(&key_val->list);
- strlcpy(key_val->key, key, sizeof(key_val->key));
+ strscpy(key_val->key, key, sizeof(key_val->key));
if (type == ADF_DEC) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%ld", (*((long *)val)));
} else if (type == ADF_STR) {
- strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+ strscpy(key_val->val, (char *)val, sizeof(key_val->val));
} else if (type == ADF_HEX) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
@@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
if (!sec)
return -ENOMEM;
- strlcpy(sec->name, name, sizeof(sec->name));
+ strscpy(sec->name, name, sizeof(sec->name));
INIT_LIST_HEAD(&sec->param_head);
down_write(&cfg->lock);
list_add_tail(&sec->list, &cfg->sec_list);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e8ac932bbaab..82b69e1f725b 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -16,6 +16,9 @@
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
#define DEVICE_NAME "qat_adf_ctl"
static DEFINE_MUTEX(adf_ctl_lock);
@@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_key_val *params_head;
struct adf_user_cfg_section section, *section_head;
+ int i, j;
section_head = ctl_data->config_section;
- while (section_head) {
+ for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
if (copy_from_user(&section, (void __user *)section_head,
sizeof(*section_head))) {
dev_err(&GET_DEV(accel_dev),
@@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
params_head = section.params;
- while (params_head) {
+ for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
dev_err(&GET_DEV(accel_dev),
@@ -363,7 +367,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
dev_info.num_logical_accel = hw_data->num_logical_accel;
dev_info.banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
- strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
dev_info.instance_id = hw_data->instance_id;
dev_info.type = hw_data->dev_class->type;
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 43b8f864806b..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -107,7 +107,7 @@ do { \
* Timeout is in cycles. Clock speed may vary across products but this
* value should be a few milli-seconds.
*/
-#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
#define ADF_SSMWDTL_OFFSET 0x54
#define ADF_SSMWDTH_OFFSET 0x5C
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e69e5907f595..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
if (!ring_debug)
return -ENOMEM;
- strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
ring->ring_number);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 4b36869bf460..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -86,7 +86,8 @@
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index fb45fa83841c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n);
int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
@@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sgl_src_valid = true;
}
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
@@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -817,7 +823,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (!qat_req->buf.sgl_dst_valid)
kfree(buflout);
@@ -831,7 +837,7 @@ err_in:
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
if (!qat_req->buf.sgl_src_valid)
kfree(bufl);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 095ed2a404d2..94a26702aeae 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,14 +332,14 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(qat_req->in.dh.in.b),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+ sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(qat_req->out.dh.r),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+ sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -729,14 +729,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(qat_req->in.rsa.enc.m),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(qat_req->out.rsa.enc.c),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -875,14 +875,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
else
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(qat_req->in.rsa.dec.c),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(qat_req->out.rsa.dec.m),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 0fe5a474aa45..b7f7869ef8b2 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
}
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ unsigned int fw_type)
+{
+ char *fw_type_name = fw_type ? "MMP" : "AE";
+ unsigned int css_dword_size = sizeof(u32);
+
+ if (handle->chip_info->fw_auth) {
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+ if ((css_hdr->header_len * css_dword_size) != header_len)
+ goto err;
+ if ((css_hdr->size * css_dword_size) != size)
+ goto err;
+ if (fw_type != css_hdr->fw_type)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ size -= header_len;
+ }
+
+ if (fw_type == CSS_AE_FIRMWARE) {
+ if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+ goto err;
+ if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+ goto err;
+ } else if (fw_type == CSS_MMP_FIRMWARE) {
+ if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+ goto err;
+ } else {
+ pr_err("QAT: Unsupported firmware type\n");
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ return -EINVAL;
+}
+
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
@@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+ if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
@@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
+ int ret;
+
+ ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+ if (ret)
+ return ret;
if (handle->chip_info->fw_auth) {
status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
@@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+ int ret;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+ ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+ simg_hdr[i].simg_len,
+ CSS_AE_FIRMWARE);
+ if (ret)
+ return ret;
+
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 97a530171f07..6eb4d2e35629 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
if (ret)
return ret;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 59159f5e64e5..37bafd7aeb79 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
}
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto error_unmap_src;
+ }
ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
&rctx->result_sg, 1, qce_ahash_done, async_req);
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 3d27cd5210ef..5b493fdc1e74 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
rctx->dst_sg = rctx->dst_tbl.sgl;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
if (diff_dst) {
src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (src_nents < 0) {
- ret = src_nents;
+ if (!src_nents) {
+ ret = -EIO;
goto error_unmap_dst;
}
rctx->src_sg = req->src;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 031b5f701a0a..72dd1a4ebac4 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -9,6 +9,7 @@
#include <linux/crypto.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_ACPI)
-static const struct acpi_device_id qcom_rng_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
-#endif
-static const struct of_device_id qcom_rng_of_match[] = {
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
{ .compatible = "qcom,prng", .data = (void *)0},
{ .compatible = "qcom,prng-ee", .data = (void *)1},
{}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 457084b344c1..7ab20fb95166 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -26,10 +26,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -196,7 +196,7 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (ret != dev->nb_in_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in;
}
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
- if (ret != dev->nb_out_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out;
}
@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
err = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data)
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index acf31cc1dbcc..97086fab698e 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -48,7 +48,7 @@ void hmem_register_device(int target_nid, struct resource *r)
rc = platform_device_add_data(pdev, &info, sizeof(info));
if (rc < 0) {
pr_err("hmem memregion_info allocation failure for %pr\n", &res);
- goto out_pdev;
+ goto out_resource;
}
rc = platform_device_add_resources(pdev, &res, 1);
@@ -66,7 +66,7 @@ void hmem_register_device(int target_nid, struct resource *r)
return;
out_resource:
- put_device(&pdev->dev);
+ platform_device_put(pdev);
out_pdev:
memregion_free(id);
}
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index a37622060fff..4852a2dbdb27 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -11,9 +11,17 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/memory-tiers.h>
#include "dax-private.h"
#include "bus.h"
+/*
+ * Default abstract distance assigned to the NUMA node onlined
+ * by DAX/kmem if the low level platform driver didn't initialize
+ * one for this NUMA node.
+ */
+#define MEMTIER_DEFAULT_DAX_ADISTANCE (MEMTIER_ADISTANCE_DRAM * 5)
+
/* Memory resource name used for add_memory_driver_managed(). */
static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */
@@ -41,6 +49,7 @@ struct dax_kmem_data {
struct resource *res[];
};
+static struct memory_dev_type *dax_slowmem_type;
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
@@ -79,11 +88,13 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ init_node_memory_type(numa_node, dax_slowmem_type);
+
+ rc = -ENOMEM;
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto err_dax_kmem_data;
- rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
@@ -155,6 +166,8 @@ err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
+err_dax_kmem_data:
+ clear_node_memory_type(numa_node, dax_slowmem_type);
return rc;
}
@@ -162,6 +175,7 @@ err_res_name:
static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
+ int node = dev_dax->target_node;
struct device *dev = &dev_dax->dev;
struct dax_kmem_data *data = dev_get_drvdata(dev);
@@ -198,6 +212,14 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
+ /*
+ * Clear the memtype association on successful unplug.
+ * If not, we have memory blocks left which can be
+ * offlined/onlined later. We need to keep memory_dev_type
+ * for that. This implies this reference will be around
+ * till next reboot.
+ */
+ clear_node_memory_type(node, dax_slowmem_type);
}
}
#else
@@ -228,9 +250,22 @@ static int __init dax_kmem_init(void)
if (!kmem_name)
return -ENOMEM;
+ dax_slowmem_type = alloc_memory_type(MEMTIER_DEFAULT_DAX_ADISTANCE);
+ if (IS_ERR(dax_slowmem_type)) {
+ rc = PTR_ERR(dax_slowmem_type);
+ goto err_dax_slowmem_type;
+ }
+
rc = dax_driver_register(&device_dax_kmem_driver);
if (rc)
- kfree_const(kmem_name);
+ goto error_dax_driver;
+
+ return rc;
+
+error_dax_driver:
+ destroy_memory_type(dax_slowmem_type);
+err_dax_slowmem_type:
+ kfree_const(kmem_name);
return rc;
}
@@ -239,6 +274,7 @@ static void __exit dax_kmem_exit(void)
dax_driver_unregister(&device_dax_kmem_driver);
if (!any_hotremove_failed)
kfree_const(kmem_name);
+ destroy_memory_type(dax_slowmem_type);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 9b5e2a5eb0ae..da4438f3188c 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -363,7 +363,7 @@ static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
if (inode->i_rdev)
- ida_simple_remove(&dax_minor_ida, iminor(inode));
+ ida_free(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
}
@@ -445,7 +445,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
- minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
+ minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0)
return ERR_PTR(-ENOMEM);
@@ -459,7 +459,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
return dax_dev;
err_dev:
- ida_simple_remove(&dax_minor_ida, minor);
+ ida_free(&dax_minor_ida, minor);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a06d2a7627aa..7524b62a8870 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -180,7 +180,7 @@ config DMA_SUN6I
config DW_AXI_DMAC
tristate "Synopsys DesignWare AXI DMA support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 487a01aa207d..eea8bd33b4b7 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2367,7 +2367,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
INIT_LIST_HEAD(&dmadev->channels);
/*
- * Register as many many memcpy as we have physical channels,
+ * Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index d1f74a3aa999..317ca76ccafd 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -12,8 +12,9 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
-#include <linux/interrupt.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include "dmaengine.h"
@@ -95,7 +96,9 @@ struct admac_data {
struct dma_device dma;
struct device *dev;
__iomem void *base;
+ struct reset_control *rstc;
+ int irq;
int irq_index;
int nchannels;
struct admac_chan channels[];
@@ -724,18 +727,17 @@ static int admac_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
-
- err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
- 0, dev_name(&pdev->dev), ad);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "unable to register interrupt\n");
+ ad->irq = irq;
ad->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ad->base))
return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
"unable to obtain MMIO resource\n");
+ ad->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(ad->rstc))
+ return PTR_ERR(ad->rstc);
+
dma = &ad->dma;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
@@ -774,17 +776,38 @@ static int admac_probe(struct platform_device *pdev)
tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
}
- err = dma_async_device_register(&ad->dma);
+ err = reset_control_reset(ad->rstc);
if (err)
- return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+ return dev_err_probe(&pdev->dev, err,
+ "unable to trigger reset\n");
+
+ err = request_irq(irq, admac_interrupt, 0, dev_name(&pdev->dev), ad);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+ goto free_reset;
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+ goto free_irq;
+ }
err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
if (err) {
dma_async_device_unregister(&ad->dma);
- return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ goto free_irq;
}
return 0;
+
+free_irq:
+ free_irq(ad->irq, ad);
+free_reset:
+ reset_control_rearm(ad->rstc);
+ return err;
}
static int admac_remove(struct platform_device *pdev)
@@ -793,6 +816,8 @@ static int admac_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&ad->dma);
+ free_irq(ad->irq, ad);
+ reset_control_rearm(ad->rstc);
return 0;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b102d8eb5d83..d6c9781cd46a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1470,10 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
-
- if (!txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 9fe2ae794316..ffe621695e47 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -312,7 +312,7 @@ static unsigned long dmatest_random(void)
{
unsigned long buf;
- prandom_bytes(&buf, sizeof(buf));
+ get_random_bytes(&buf, sizeof(buf));
return buf;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 07f756479663..c54b24ff5206 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/pm_runtime.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -682,15 +681,12 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
- pm_runtime_get(chan->dw->chip->dev);
-
return 0;
}
static void dw_edma_free_chan_resources(struct dma_chan *dchan)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
int ret;
while (time_before(jiffies, timeout)) {
@@ -703,8 +699,6 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
-
- pm_runtime_put(chan->dw->chip->dev);
}
static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
@@ -977,9 +971,6 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
goto err_irq_free;
- /* Power management */
- pm_runtime_enable(dev);
-
/* Turn debugfs on */
dw_edma_v0_core_debugfs_on(dw);
@@ -1009,9 +1000,6 @@ int dw_edma_remove(struct dw_edma_chip *chip)
for (i = (dw->nr_irqs - 1); i >= 0; i--)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
- /* Power management */
- pm_runtime_disable(dev);
-
/* Deregister eDMA device */
dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 43817ced3a3e..c1350a36fddd 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2019 HiSilicon Limited. */
+/* Copyright(c) 2019-2022 HiSilicon Limited. */
+
#include <linux/bitfield.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
@@ -9,32 +10,87 @@
#include <linux/spinlock.h>
#include "virt-dma.h"
-#define HISI_DMA_SQ_BASE_L 0x0
-#define HISI_DMA_SQ_BASE_H 0x4
-#define HISI_DMA_SQ_DEPTH 0x8
-#define HISI_DMA_SQ_TAIL_PTR 0xc
-#define HISI_DMA_CQ_BASE_L 0x10
-#define HISI_DMA_CQ_BASE_H 0x14
-#define HISI_DMA_CQ_DEPTH 0x18
-#define HISI_DMA_CQ_HEAD_PTR 0x1c
-#define HISI_DMA_CTRL0 0x20
-#define HISI_DMA_CTRL0_QUEUE_EN_S 0
-#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
-#define HISI_DMA_CTRL1 0x24
-#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
-#define HISI_DMA_Q_FSM_STS 0x30
-#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
-#define HISI_DMA_INT_STS 0x40
-#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
-#define HISI_DMA_INT_MSK 0x44
-#define HISI_DMA_MODE 0x217c
-#define HISI_DMA_OFFSET 0x100
-
-#define HISI_DMA_MSI_NUM 32
-#define HISI_DMA_CHAN_NUM 30
-#define HISI_DMA_Q_DEPTH_VAL 1024
-
-#define PCI_BAR_2 2
+/* HiSilicon DMA register common field define */
+#define HISI_DMA_Q_SQ_BASE_L 0x0
+#define HISI_DMA_Q_SQ_BASE_H 0x4
+#define HISI_DMA_Q_SQ_DEPTH 0x8
+#define HISI_DMA_Q_SQ_TAIL_PTR 0xc
+#define HISI_DMA_Q_CQ_BASE_L 0x10
+#define HISI_DMA_Q_CQ_BASE_H 0x14
+#define HISI_DMA_Q_CQ_DEPTH 0x18
+#define HISI_DMA_Q_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_Q_CTRL0 0x20
+#define HISI_DMA_Q_CTRL0_QUEUE_EN BIT(0)
+#define HISI_DMA_Q_CTRL0_QUEUE_PAUSE BIT(4)
+#define HISI_DMA_Q_CTRL1 0x24
+#define HISI_DMA_Q_CTRL1_QUEUE_RESET BIT(0)
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_Q_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_Q_ERR_INT_NUM0 0x84
+#define HISI_DMA_Q_ERR_INT_NUM1 0x88
+#define HISI_DMA_Q_ERR_INT_NUM2 0x8c
+
+/* HiSilicon IP08 DMA register and field define */
+#define HISI_DMA_HIP08_MODE 0x217C
+#define HISI_DMA_HIP08_Q_BASE 0x0
+#define HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN BIT(2)
+#define HISI_DMA_HIP08_Q_INT_STS 0x40
+#define HISI_DMA_HIP08_Q_INT_MSK 0x44
+#define HISI_DMA_HIP08_Q_INT_STS_MASK GENMASK(14, 0)
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM3 0x90
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM4 0x94
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM5 0x98
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM6 0x48
+#define HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT BIT(24)
+
+/* HiSilicon IP09 DMA register and field define */
+#define HISI_DMA_HIP09_DMA_FLR_DISABLE 0xA00
+#define HISI_DMA_HIP09_DMA_FLR_DISABLE_B BIT(0)
+#define HISI_DMA_HIP09_Q_BASE 0x2000
+#define HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN GENMASK(31, 28)
+#define HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT BIT(26)
+#define HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT BIT(27)
+#define HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE BIT(2)
+#define HISI_DMA_HIP09_Q_INT_STS 0x40
+#define HISI_DMA_HIP09_Q_INT_MSK 0x44
+#define HISI_DMA_HIP09_Q_INT_STS_MASK 0x1
+#define HISI_DMA_HIP09_Q_ERR_INT_STS 0x48
+#define HISI_DMA_HIP09_Q_ERR_INT_MSK 0x4C
+#define HISI_DMA_HIP09_Q_ERR_INT_STS_MASK GENMASK(18, 1)
+#define HISI_DMA_HIP09_PORT_CFG_REG(port_id) (0x800 + \
+ (port_id) * 0x20)
+#define HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B BIT(16)
+
+#define HISI_DMA_HIP09_MAX_PORT_NUM 16
+
+#define HISI_DMA_HIP08_MSI_NUM 32
+#define HISI_DMA_HIP08_CHAN_NUM 30
+#define HISI_DMA_HIP09_MSI_NUM 4
+#define HISI_DMA_HIP09_CHAN_NUM 4
+#define HISI_DMA_REVISION_HIP08B 0x21
+#define HISI_DMA_REVISION_HIP09A 0x30
+
+#define HISI_DMA_Q_OFFSET 0x100
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+#define HISI_DMA_POLL_Q_STS_DELAY_US 10
+#define HISI_DMA_POLL_Q_STS_TIME_OUT_US 1000
+
+#define HISI_DMA_MAX_DIR_NAME_LEN 128
+
+/*
+ * The HIP08B(HiSilicon IP08) and HIP09A(HiSilicon IP09) are DMA iEPs, they
+ * have the same pci device id but different pci revision.
+ * Unfortunately, they have different register layouts, so two layout
+ * enumerations are defined.
+ */
+enum hisi_dma_reg_layout {
+ HISI_DMA_REG_LAYOUT_INVALID = 0,
+ HISI_DMA_REG_LAYOUT_HIP08,
+ HISI_DMA_REG_LAYOUT_HIP09
+};
enum hisi_dma_mode {
EP = 0,
@@ -105,9 +161,162 @@ struct hisi_dma_dev {
struct dma_device dma_dev;
u32 chan_num;
u32 chan_depth;
+ enum hisi_dma_reg_layout reg_layout;
+ void __iomem *queue_base; /* queue region start of register */
struct hisi_dma_chan chan[];
};
+#ifdef CONFIG_DEBUG_FS
+
+static const struct debugfs_reg32 hisi_dma_comm_chan_regs[] = {
+ {"DMA_QUEUE_SQ_DEPTH ", 0x0008ull},
+ {"DMA_QUEUE_SQ_TAIL_PTR ", 0x000Cull},
+ {"DMA_QUEUE_CQ_DEPTH ", 0x0018ull},
+ {"DMA_QUEUE_CQ_HEAD_PTR ", 0x001Cull},
+ {"DMA_QUEUE_CTRL0 ", 0x0020ull},
+ {"DMA_QUEUE_CTRL1 ", 0x0024ull},
+ {"DMA_QUEUE_FSM_STS ", 0x0030ull},
+ {"DMA_QUEUE_SQ_STS ", 0x0034ull},
+ {"DMA_QUEUE_CQ_TAIL_PTR ", 0x003Cull},
+ {"DMA_QUEUE_INT_STS ", 0x0040ull},
+ {"DMA_QUEUE_INT_MSK ", 0x0044ull},
+ {"DMA_QUEUE_INT_RO ", 0x006Cull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip08_chan_regs[] = {
+ {"DMA_QUEUE_BYTE_CNT ", 0x0038ull},
+ {"DMA_ERR_INT_NUM6 ", 0x0048ull},
+ {"DMA_QUEUE_DESP0 ", 0x0050ull},
+ {"DMA_QUEUE_DESP1 ", 0x0054ull},
+ {"DMA_QUEUE_DESP2 ", 0x0058ull},
+ {"DMA_QUEUE_DESP3 ", 0x005Cull},
+ {"DMA_QUEUE_DESP4 ", 0x0074ull},
+ {"DMA_QUEUE_DESP5 ", 0x0078ull},
+ {"DMA_QUEUE_DESP6 ", 0x007Cull},
+ {"DMA_QUEUE_DESP7 ", 0x0080ull},
+ {"DMA_ERR_INT_NUM0 ", 0x0084ull},
+ {"DMA_ERR_INT_NUM1 ", 0x0088ull},
+ {"DMA_ERR_INT_NUM2 ", 0x008Cull},
+ {"DMA_ERR_INT_NUM3 ", 0x0090ull},
+ {"DMA_ERR_INT_NUM4 ", 0x0094ull},
+ {"DMA_ERR_INT_NUM5 ", 0x0098ull},
+ {"DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip09_chan_regs[] = {
+ {"DMA_QUEUE_ERR_INT_STS ", 0x0048ull},
+ {"DMA_QUEUE_ERR_INT_MSK ", 0x004Cull},
+ {"DFX_SQ_READ_ERR_PTR ", 0x0068ull},
+ {"DFX_DMA_ERR_INT_NUM0 ", 0x0084ull},
+ {"DFX_DMA_ERR_INT_NUM1 ", 0x0088ull},
+ {"DFX_DMA_ERR_INT_NUM2 ", 0x008Cull},
+ {"DFX_DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip08_comm_regs[] = {
+ {"DMA_ECC_ERR_ADDR ", 0x2004ull},
+ {"DMA_ECC_ECC_CNT ", 0x2014ull},
+ {"COMMON_AND_CH_ERR_STS ", 0x2030ull},
+ {"LOCAL_CPL_ID_STS_0 ", 0x20E0ull},
+ {"LOCAL_CPL_ID_STS_1 ", 0x20E4ull},
+ {"LOCAL_CPL_ID_STS_2 ", 0x20E8ull},
+ {"LOCAL_CPL_ID_STS_3 ", 0x20ECull},
+ {"LOCAL_TLP_NUM ", 0x2158ull},
+ {"SQCQ_TLP_NUM ", 0x2164ull},
+ {"CPL_NUM ", 0x2168ull},
+ {"INF_BACK_PRESS_STS ", 0x2170ull},
+ {"DMA_CH_RAS_LEVEL ", 0x2184ull},
+ {"DMA_CM_RAS_LEVEL ", 0x2188ull},
+ {"DMA_CH_ERR_STS ", 0x2190ull},
+ {"DMA_CH_DONE_STS ", 0x2194ull},
+ {"DMA_SQ_TAG_STS_0 ", 0x21A0ull},
+ {"DMA_SQ_TAG_STS_1 ", 0x21A4ull},
+ {"DMA_SQ_TAG_STS_2 ", 0x21A8ull},
+ {"DMA_SQ_TAG_STS_3 ", 0x21ACull},
+ {"LOCAL_P_ID_STS_0 ", 0x21B0ull},
+ {"LOCAL_P_ID_STS_1 ", 0x21B4ull},
+ {"LOCAL_P_ID_STS_2 ", 0x21B8ull},
+ {"LOCAL_P_ID_STS_3 ", 0x21BCull},
+ {"DMA_PREBUFF_INFO_0 ", 0x2200ull},
+ {"DMA_CM_TABLE_INFO_0 ", 0x2220ull},
+ {"DMA_CM_CE_RO ", 0x2244ull},
+ {"DMA_CM_NFE_RO ", 0x2248ull},
+ {"DMA_CM_FE_RO ", 0x224Cull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip09_comm_regs[] = {
+ {"COMMON_AND_CH_ERR_STS ", 0x0030ull},
+ {"DMA_PORT_IDLE_STS ", 0x0150ull},
+ {"DMA_CH_RAS_LEVEL ", 0x0184ull},
+ {"DMA_CM_RAS_LEVEL ", 0x0188ull},
+ {"DMA_CM_CE_RO ", 0x0244ull},
+ {"DMA_CM_NFE_RO ", 0x0248ull},
+ {"DMA_CM_FE_RO ", 0x024Cull},
+ {"DFX_INF_BACK_PRESS_STS0 ", 0x1A40ull},
+ {"DFX_INF_BACK_PRESS_STS1 ", 0x1A44ull},
+ {"DFX_INF_BACK_PRESS_STS2 ", 0x1A48ull},
+ {"DFX_DMA_WRR_DISABLE ", 0x1A4Cull},
+ {"DFX_PA_REQ_TLP_NUM ", 0x1C00ull},
+ {"DFX_PA_BACK_TLP_NUM ", 0x1C04ull},
+ {"DFX_PA_RETRY_TLP_NUM ", 0x1C08ull},
+ {"DFX_LOCAL_NP_TLP_NUM ", 0x1C0Cull},
+ {"DFX_LOCAL_CPL_HEAD_TLP_NUM ", 0x1C10ull},
+ {"DFX_LOCAL_CPL_DATA_TLP_NUM ", 0x1C14ull},
+ {"DFX_LOCAL_CPL_EXT_DATA_TLP_NUM ", 0x1C18ull},
+ {"DFX_LOCAL_P_HEAD_TLP_NUM ", 0x1C1Cull},
+ {"DFX_LOCAL_P_ACK_TLP_NUM ", 0x1C20ull},
+ {"DFX_BUF_ALOC_PORT_REQ_NUM ", 0x1C24ull},
+ {"DFX_BUF_ALOC_PORT_RESULT_NUM ", 0x1C28ull},
+ {"DFX_BUF_FAIL_SIZE_NUM ", 0x1C2Cull},
+ {"DFX_BUF_ALOC_SIZE_NUM ", 0x1C30ull},
+ {"DFX_BUF_NP_RELEASE_SIZE_NUM ", 0x1C34ull},
+ {"DFX_BUF_P_RELEASE_SIZE_NUM ", 0x1C38ull},
+ {"DFX_BUF_PORT_RELEASE_SIZE_NUM ", 0x1C3Cull},
+ {"DFX_DMA_PREBUF_MEM0_ECC_ERR_ADDR ", 0x1CA8ull},
+ {"DFX_DMA_PREBUF_MEM0_ECC_CNT ", 0x1CACull},
+ {"DFX_DMA_LOC_NP_OSTB_ECC_ERR_ADDR ", 0x1CB0ull},
+ {"DFX_DMA_LOC_NP_OSTB_ECC_CNT ", 0x1CB4ull},
+ {"DFX_DMA_PREBUF_MEM1_ECC_ERR_ADDR ", 0x1CC0ull},
+ {"DFX_DMA_PREBUF_MEM1_ECC_CNT ", 0x1CC4ull},
+ {"DMA_CH_DONE_STS ", 0x02E0ull},
+ {"DMA_CH_ERR_STS ", 0x0320ull},
+};
+#endif /* CONFIG_DEBUG_FS*/
+
+static enum hisi_dma_reg_layout hisi_dma_get_reg_layout(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_REG_LAYOUT_HIP08;
+ else if (pdev->revision >= HISI_DMA_REVISION_HIP09A)
+ return HISI_DMA_REG_LAYOUT_HIP09;
+
+ return HISI_DMA_REG_LAYOUT_INVALID;
+}
+
+static u32 hisi_dma_get_chan_num(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_CHAN_NUM;
+
+ return HISI_DMA_HIP09_CHAN_NUM;
+}
+
+static u32 hisi_dma_get_msi_num(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_MSI_NUM;
+
+ return HISI_DMA_HIP09_MSI_NUM;
+}
+
+static u32 hisi_dma_get_queue_base(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_Q_BASE;
+
+ return HISI_DMA_HIP09_Q_BASE;
+}
+
static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
{
return container_of(c, struct hisi_dma_chan, vc.chan);
@@ -121,7 +330,7 @@ static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
u32 val)
{
- writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+ writel_relaxed(val, base + reg + index * HISI_DMA_Q_OFFSET);
}
static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
@@ -129,70 +338,103 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
u32 tmp;
tmp = readl_relaxed(addr);
- tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ tmp = val ? tmp | pos : tmp & ~pos;
writel_relaxed(tmp, addr);
}
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+ addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_PAUSE, pause);
}
static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool enable)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+ addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_EN, enable);
}
static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
- HISI_DMA_INT_STS_MASK);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
+ qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
+ else {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
+ qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
+ qp_index,
+ HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
+ }
}
static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- void __iomem *base = hdma_dev->base;
-
- hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
- HISI_DMA_INT_STS_MASK);
- hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_STS,
+ qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
+ qp_index, 0);
+ } else {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_STS,
+ qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_STS,
+ qp_index,
+ HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
+ qp_index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
+ qp_index, 0);
+ }
}
static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+ addr = hdma_dev->queue_base +
+ HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL1_QUEUE_RESET, 1);
}
static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
{
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
}
-static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan,
+ bool disable)
{
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
u32 index = chan->qp_num, tmp;
+ void __iomem *addr;
int ret;
hisi_dma_pause_dma(hdma_dev, index, true);
hisi_dma_enable_dma(hdma_dev, index, false);
hisi_dma_mask_irq(hdma_dev, index);
- ret = readl_relaxed_poll_timeout(hdma_dev->base +
- HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
- FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ addr = hdma_dev->queue_base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_Q_OFFSET;
+
+ ret = readl_relaxed_poll_timeout(addr, tmp,
+ FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) != RUN,
+ HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
if (ret) {
dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
WARN_ON(1);
@@ -201,12 +443,15 @@ static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
hisi_dma_do_reset(hdma_dev, index);
hisi_dma_reset_qp_point(hdma_dev, index);
hisi_dma_pause_dma(hdma_dev, index, false);
- hisi_dma_enable_dma(hdma_dev, index, true);
- hisi_dma_unmask_irq(hdma_dev, index);
- ret = readl_relaxed_poll_timeout(hdma_dev->base +
- HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
- FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (!disable) {
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+ }
+
+ ret = readl_relaxed_poll_timeout(addr, tmp,
+ FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) == IDLE,
+ HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
if (ret) {
dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
WARN_ON(1);
@@ -218,7 +463,7 @@ static void hisi_dma_free_chan_resources(struct dma_chan *c)
struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
- hisi_dma_reset_hw_chan(chan);
+ hisi_dma_reset_or_disable_hw_chan(chan, false);
vchan_free_chan_resources(&chan->vc);
memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
@@ -267,7 +512,6 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
vd = vchan_next_desc(&chan->vc);
if (!vd) {
- dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
chan->desc = NULL;
return;
}
@@ -288,8 +532,8 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
/* update sq_tail to trigger a new task */
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
- chan->sq_tail);
+ hisi_dma_chan_write(hdma_dev->queue_base, HISI_DMA_Q_SQ_TAIL_PTR,
+ chan->qp_num, chan->sq_tail);
}
static void hisi_dma_issue_pending(struct dma_chan *c)
@@ -299,7 +543,7 @@ static void hisi_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vc.lock, flags);
- if (vchan_issue_pending(&chan->vc))
+ if (vchan_issue_pending(&chan->vc) && !chan->desc)
hisi_dma_start_transfer(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -363,26 +607,86 @@ static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
{
struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ void __iomem *q_base = hdma_dev->queue_base;
u32 hw_depth = hdma_dev->chan_depth - 1;
- void __iomem *base = hdma_dev->base;
+ void __iomem *addr;
+ u32 tmp;
/* set sq, cq base */
- hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_L, index,
lower_32_bits(chan->sq_dma));
- hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_H, index,
upper_32_bits(chan->sq_dma));
- hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_L, index,
lower_32_bits(chan->cq_dma));
- hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_H, index,
upper_32_bits(chan->cq_dma));
/* set sq, cq depth */
- hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
- hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_DEPTH, index, hw_depth);
/* init sq tail and cq head */
- hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
- hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
+
+ /* init error interrupt stats */
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM0, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM1, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM2, index, 0);
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM3,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM4,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM5,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM6,
+ index, 0);
+ /*
+ * init SQ/CQ direction selecting register.
+ * "0" is to local side and "1" is to remote side.
+ */
+ addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT, 0);
+
+ /*
+ * 0 - Continue to next descriptor if error occurs.
+ * 1 - Abort the DMA queue if error occurs.
+ */
+ hisi_dma_update_bit(addr,
+ HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN, 0);
+ } else {
+ addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
+
+ /*
+ * init SQ/CQ direction selecting register.
+ * "0" is to local side and "1" is to remote side.
+ */
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT, 0);
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT, 0);
+
+ /*
+ * 0 - Continue to next descriptor if error occurs.
+ * 1 - Abort the DMA queue if error occurs.
+ */
+
+ tmp = readl_relaxed(addr);
+ tmp &= ~HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN;
+ writel_relaxed(tmp, addr);
+
+ /*
+ * 0 - dma should process FLR whith CPU.
+ * 1 - dma not process FLR, only cpu process FLR.
+ */
+ addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_DMA_FLR_DISABLE_B, 0);
+
+ addr = q_base + HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE, 1);
+ }
}
static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
@@ -394,7 +698,7 @@ static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+ hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true);
}
static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
@@ -426,24 +730,23 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
struct hisi_dma_desc *desc;
struct hisi_dma_cqe *cqe;
+ void __iomem *q_base;
spin_lock(&chan->vc.lock);
desc = chan->desc;
cqe = chan->cq + chan->cq_head;
+ q_base = hdma_dev->queue_base;
if (desc) {
+ chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth;
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR,
+ chan->qp_num, chan->cq_head);
if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
- chan->cq_head = (chan->cq_head + 1) %
- hdma_dev->chan_depth;
- hisi_dma_chan_write(hdma_dev->base,
- HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
- chan->cq_head);
vchan_cookie_complete(&desc->vd);
+ hisi_dma_start_transfer(chan);
} else {
dev_err(&hdma_dev->pdev->dev, "task error!\n");
}
-
- chan->desc = NULL;
}
spin_unlock(&chan->vc.lock);
@@ -497,16 +800,169 @@ static void hisi_dma_disable_hw_channels(void *data)
static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
enum hisi_dma_mode mode)
{
- writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ writel_relaxed(mode == RC ? 1 : 0,
+ hdma_dev->base + HISI_DMA_HIP08_MODE);
}
+static void hisi_dma_init_hw(struct hisi_dma_dev *hdma_dev)
+{
+ void __iomem *addr;
+ int i;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
+ for (i = 0; i < HISI_DMA_HIP09_MAX_PORT_NUM; i++) {
+ addr = hdma_dev->base + HISI_DMA_HIP09_PORT_CFG_REG(i);
+ hisi_dma_update_bit(addr,
+ HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B, 1);
+ }
+ }
+}
+
+static void hisi_dma_init_dma_dev(struct hisi_dma_dev *hdma_dev)
+{
+ struct dma_device *dma_dev;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = &hdma_dev->pdev->dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+}
+
+/* --- debugfs implementation --- */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+static struct debugfs_reg32 *hisi_dma_get_ch_regs(struct hisi_dma_dev *hdma_dev,
+ u32 *regs_sz)
+{
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct debugfs_reg32 *regs;
+ u32 regs_sz_comm;
+
+ regs_sz_comm = ARRAY_SIZE(hisi_dma_comm_chan_regs);
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip08_chan_regs);
+ else
+ *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip09_chan_regs);
+
+ regs = devm_kcalloc(dev, *regs_sz, sizeof(struct debugfs_reg32),
+ GFP_KERNEL);
+ if (!regs)
+ return NULL;
+ memcpy(regs, hisi_dma_comm_chan_regs, sizeof(hisi_dma_comm_chan_regs));
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ memcpy(regs + regs_sz_comm, hisi_dma_hip08_chan_regs,
+ sizeof(hisi_dma_hip08_chan_regs));
+ else
+ memcpy(regs + regs_sz_comm, hisi_dma_hip09_chan_regs,
+ sizeof(hisi_dma_hip09_chan_regs));
+
+ return regs;
+}
+
+static int hisi_dma_create_chan_dir(struct hisi_dma_dev *hdma_dev)
+{
+ char dir_name[HISI_DMA_MAX_DIR_NAME_LEN];
+ struct debugfs_regset32 *regsets;
+ struct debugfs_reg32 *regs;
+ struct dentry *chan_dir;
+ struct device *dev;
+ u32 regs_sz;
+ int ret;
+ int i;
+
+ dev = &hdma_dev->pdev->dev;
+
+ regsets = devm_kcalloc(dev, hdma_dev->chan_num,
+ sizeof(*regsets), GFP_KERNEL);
+ if (!regsets)
+ return -ENOMEM;
+
+ regs = hisi_dma_get_ch_regs(hdma_dev, &regs_sz);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ regsets[i].regs = regs;
+ regsets[i].nregs = regs_sz;
+ regsets[i].base = hdma_dev->queue_base + i * HISI_DMA_Q_OFFSET;
+ regsets[i].dev = dev;
+
+ memset(dir_name, 0, HISI_DMA_MAX_DIR_NAME_LEN);
+ ret = sprintf(dir_name, "channel%d", i);
+ if (ret < 0)
+ return ret;
+
+ chan_dir = debugfs_create_dir(dir_name,
+ hdma_dev->dma_dev.dbg_dev_root);
+ debugfs_create_regset32("regs", 0444, chan_dir, &regsets[i]);
+ }
+
+ return 0;
+}
+
+static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev)
+{
+ struct debugfs_regset32 *regset;
+ struct device *dev;
+ int ret;
+
+ dev = &hdma_dev->pdev->dev;
+
+ if (hdma_dev->dma_dev.dbg_dev_root == NULL)
+ return;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ regset->regs = hisi_dma_hip08_comm_regs;
+ regset->nregs = ARRAY_SIZE(hisi_dma_hip08_comm_regs);
+ } else {
+ regset->regs = hisi_dma_hip09_comm_regs;
+ regset->nregs = ARRAY_SIZE(hisi_dma_hip09_comm_regs);
+ }
+ regset->base = hdma_dev->base;
+ regset->dev = dev;
+
+ debugfs_create_regset32("regs", 0444,
+ hdma_dev->dma_dev.dbg_dev_root, regset);
+
+ ret = hisi_dma_create_chan_dir(hdma_dev);
+ if (ret < 0)
+ dev_info(&hdma_dev->pdev->dev, "fail to create debugfs for channels!\n");
+}
+#else
+static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev) { }
+#endif /* CONFIG_DEBUG_FS*/
+/* --- debugfs implementation --- */
+
static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ enum hisi_dma_reg_layout reg_layout;
struct device *dev = &pdev->dev;
struct hisi_dma_dev *hdma_dev;
struct dma_device *dma_dev;
+ u32 chan_num;
+ u32 msi_num;
int ret;
+ reg_layout = hisi_dma_get_reg_layout(pdev);
+ if (reg_layout == HISI_DMA_REG_LAYOUT_INVALID) {
+ dev_err(dev, "unsupported device!\n");
+ return -EINVAL;
+ }
+
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(dev, "failed to enable device mem!\n");
@@ -523,40 +979,37 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, HISI_DMA_CHAN_NUM), GFP_KERNEL);
+ chan_num = hisi_dma_get_chan_num(pdev);
+ hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, chan_num),
+ GFP_KERNEL);
if (!hdma_dev)
return -EINVAL;
hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
hdma_dev->pdev = pdev;
- hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+ hdma_dev->chan_num = chan_num;
+ hdma_dev->reg_layout = reg_layout;
+ hdma_dev->queue_base = hdma_dev->base + hisi_dma_get_queue_base(pdev);
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
+ msi_num = hisi_dma_get_msi_num(pdev);
+
/* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
- ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
- PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, msi_num, msi_num, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to allocate MSI vectors!\n");
return ret;
}
- dma_dev = &hdma_dev->dma_dev;
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
- dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
- dma_dev->device_tx_status = hisi_dma_tx_status;
- dma_dev->device_issue_pending = hisi_dma_issue_pending;
- dma_dev->device_terminate_all = hisi_dma_terminate_all;
- dma_dev->device_synchronize = hisi_dma_synchronize;
- dma_dev->directions = BIT(DMA_MEM_TO_MEM);
- dma_dev->dev = dev;
- INIT_LIST_HEAD(&dma_dev->channels);
+ hisi_dma_init_dma_dev(hdma_dev);
hisi_dma_set_mode(hdma_dev, RC);
+ hisi_dma_init_hw(hdma_dev);
+
ret = hisi_dma_enable_hw_channels(hdma_dev);
if (ret < 0) {
dev_err(dev, "failed to enable hw channel!\n");
@@ -568,11 +1021,16 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ dma_dev = &hdma_dev->dma_dev;
ret = dmaenginem_async_device_register(dma_dev);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to register device!\n");
+ return ret;
+ }
+
+ hisi_dma_create_debugfs(hdma_dev);
- return ret;
+ return 0;
}
static const struct pci_device_id hisi_dma_pci_tbl[] = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 92caae55aece..af5a2e252c25 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -16,12 +16,20 @@
* port 3, and so on.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/percpu-defs.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
#include "hsu.h"
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 9e5956345748..3bca577b98a1 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -10,7 +10,11 @@
#ifndef __DMA_HSU_H__
#define __DMA_HSU_H__
-#include <linux/spinlock.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
#include <linux/dma/hsu.h>
#include "../virt-dma.h"
@@ -36,11 +40,11 @@
/* Bits in HSU_CH_SR */
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
-#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define HSU_CH_SR_DESCTO_ANY GENMASK(11, 8)
#define HSU_CH_SR_CHE BIT(15)
#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
-#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
-#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
+#define HSU_CH_SR_DESCE_ANY GENMASK(19, 16)
+#define HSU_CH_SR_CDESC_ANY GENMASK(31, 30)
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 6a2df3dd78d0..0fcc0c0c22fc 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -26,29 +27,32 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
- u32 dmaisr;
- u32 status;
+ unsigned long dmaisr;
unsigned short i;
+ u32 status;
int ret = 0;
int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
- for (i = 0; i < chip->hsu->nr_channels; i++) {
- if (dmaisr & 0x1) {
- err = hsu_dma_get_status(chip, i, &status);
- if (err > 0)
- ret |= 1;
- else if (err == 0)
- ret |= hsu_dma_do_irq(chip, i, status);
- }
- dmaisr >>= 1;
+ for_each_set_bit(i, &dmaisr, chip->hsu->nr_channels) {
+ err = hsu_dma_get_status(chip, i, &status);
+ if (err > 0)
+ ret |= 1;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(chip, i, status);
}
return IRQ_RETVAL(ret);
}
+static void hsu_pci_dma_remove(void *chip)
+{
+ hsu_dma_remove(chip);
+}
+
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct hsu_dma_chip *chip;
int ret;
@@ -87,9 +91,13 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ ret = devm_add_action_or_reset(dev, hsu_pci_dma_remove, chip);
if (ret)
- goto err_register_irq;
+ return ret;
+
+ ret = devm_request_irq(dev, chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ if (ret)
+ return ret;
/*
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
@@ -105,18 +113,6 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, chip);
return 0;
-
-err_register_irq:
- hsu_dma_remove(chip);
- return ret;
-}
-
-static void hsu_pci_remove(struct pci_dev *pdev)
-{
- struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
-
- free_irq(chip->irq, chip);
- hsu_dma_remove(chip);
}
static const struct pci_device_id hsu_pci_id_table[] = {
@@ -130,7 +126,6 @@ static struct pci_driver hsu_pci_driver = {
.name = "hsu_dma_pci",
.id_table = hsu_pci_id_table,
.probe = hsu_pci_probe,
- .remove = hsu_pci_remove,
};
module_pci_driver(hsu_pci_driver);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5a8cc52c1abf..2c1e6f6daa62 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -196,6 +196,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
}
wq->state = IDXD_WQ_ENABLED;
+ set_bit(wq->id, idxd->wq_enable_map);
dev_dbg(dev, "WQ %d enabled\n", wq->id);
return 0;
}
@@ -223,6 +224,7 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
if (reset_config)
idxd_wq_disable_cleanup(wq);
+ clear_bit(wq->id, idxd->wq_enable_map);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
@@ -258,7 +260,6 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
}
int idxd_wq_map_portal(struct idxd_wq *wq)
@@ -378,17 +379,20 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
lockdep_assert_held(&wq->wq_lock);
+ wq->state = IDXD_WQ_DISABLED;
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->threshold = 0;
wq->priority = 0;
- wq->ats_dis = 0;
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ if (wq->opcap_bmap)
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
@@ -705,6 +709,8 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->tc_a = -1;
group->tc_b = -1;
}
+ group->desc_progress_limit = 0;
+ group->batch_progress_limit = 0;
}
}
@@ -761,10 +767,10 @@ static void idxd_group_config_write(struct idxd_group *group)
/* setup GRPFLAGS */
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
- iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
- dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset,
- ioread32(idxd->reg_base + grpcfg_offset));
+ ioread64(idxd->reg_base + grpcfg_offset));
}
static int idxd_groups_config_write(struct idxd_device *idxd)
@@ -807,7 +813,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
u32 wq_offset;
- int i;
+ int i, n;
if (!wq->group)
return 0;
@@ -859,12 +865,23 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->bof = 1;
if (idxd->hw.wq_cap.wq_ats_support)
- wq->wqcfg->wq_ats_disable = wq->ats_dis;
+ wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
+ /* bytes 32-63 */
+ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
+ memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
+ for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
+ int pos = n % BITS_PER_LONG_LONG;
+ int idx = n / BITS_PER_LONG_LONG;
+
+ wq->wqcfg->op_config[idx] |= BIT(pos);
+ }
+ }
+
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
@@ -914,6 +931,9 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
+
+ group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
+ group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}
}
@@ -1096,8 +1116,8 @@ static void idxd_group_load_config(struct idxd_group *group)
}
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
- group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
- dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset, group->grpcfg.flags.bits);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index fed0dfc1eaa8..1196ab342f01 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/ioasid.h>
+#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -95,6 +96,8 @@ struct idxd_group {
u8 rdbufs_reserved;
int tc_a;
int tc_b;
+ int desc_progress_limit;
+ int batch_progress_limit;
};
struct idxd_pmu {
@@ -132,6 +135,7 @@ enum idxd_wq_state {
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
+ WQ_FLAG_ATS_DISABLE,
};
enum idxd_wq_type {
@@ -194,6 +198,8 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
+ unsigned long *opcap_bmap;
+
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
@@ -208,7 +214,6 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
- bool ats_dis;
};
struct idxd_engine {
@@ -299,6 +304,7 @@ struct idxd_device {
int rdbuf_limit;
int nr_rdbufs; /* non-reserved read buffers */
unsigned int wqcfg_size;
+ unsigned long *wq_enable_map;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
@@ -308,6 +314,8 @@ struct idxd_device {
struct work_struct work;
struct idxd_pmu *idxd_pmu;
+
+ unsigned long *opcap_bmap;
};
/* IDXD software descriptor */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index aa3478257ddb..2b18d512cbfc 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -151,6 +151,12 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (!idxd->wqs)
return -ENOMEM;
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ kfree(idxd->wqs);
+ return -ENOMEM;
+ }
+
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
@@ -185,6 +191,16 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
rc = -ENOMEM;
goto err;
}
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
+ put_device(conf_dev);
+ rc = -ENOMEM;
+ goto err;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
idxd->wqs[i] = wq;
}
@@ -369,6 +385,19 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
+static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
+{
+ int i, j, nr;
+
+ for (i = 0, nr = 0; i < count; i++) {
+ for (j = 0; j < BITS_PER_LONG_LONG; j++) {
+ if (val[i] & BIT(j))
+ set_bit(nr, bmap);
+ nr++;
+ }
+ }
+}
+
static void idxd_read_caps(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -427,6 +456,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
IDXD_OPCAP_OFFSET + i * sizeof(u64));
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
}
+ multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
@@ -448,6 +478,12 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (idxd->id < 0)
return NULL;
+ idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->opcap_bmap) {
+ ida_free(&idxd_ida, idxd->id);
+ return NULL;
+ }
+
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 743ead5ebc57..aa314ebec587 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -17,12 +17,6 @@ enum irq_work_type {
IRQ_WORK_PROCESS_FAULT,
};
-struct idxd_fault {
- struct work_struct work;
- u64 addr;
- struct idxd_device *idxd;
-};
-
struct idxd_resubmit {
struct work_struct work;
struct idxd_desc *desc;
@@ -49,11 +43,12 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
- if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
dev_warn(dev, "Unable to re-enable wq %s\n",
dev_name(wq_confdev(wq)));
}
@@ -324,13 +319,11 @@ halt:
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 02449aa9c454..fe3b8d04f9db 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -54,7 +54,8 @@ union wq_cap_reg {
u64 priority:1;
u64 occupancy:1;
u64 occupancy_int:1;
- u64 rsvd3:10;
+ u64 op_config:1;
+ u64 rsvd3:9;
};
u64 bits;
} __packed;
@@ -67,7 +68,8 @@ union group_cap_reg {
u64 total_rdbufs:8; /* formerly total_tokens */
u64 rdbuf_ctrl:1; /* formerly token_en */
u64 rdbuf_limit:1; /* formerly token_limit */
- u64 rsvd:46;
+ u64 progress_limit:1; /* descriptor and batch descriptor */
+ u64 rsvd:45;
};
u64 bits;
} __packed;
@@ -90,6 +92,8 @@ struct opcap {
u64 bits[4];
};
+#define IDXD_MAX_OPCAP_BITS 256U
+
#define IDXD_OPCAP_OFFSET 0x40
#define IDXD_TABLE_OFFSET 0x60
@@ -285,16 +289,20 @@ union msix_perm {
union group_flags {
struct {
- u32 tc_a:3;
- u32 tc_b:3;
- u32 rsvd:1;
- u32 use_rdbuf_limit:1;
- u32 rdbufs_reserved:8;
- u32 rsvd2:4;
- u32 rdbufs_allowed:8;
- u32 rsvd3:4;
+ u64 tc_a:3;
+ u64 tc_b:3;
+ u64 rsvd:1;
+ u64 use_rdbuf_limit:1;
+ u64 rdbufs_reserved:8;
+ u64 rsvd2:4;
+ u64 rdbufs_allowed:8;
+ u64 rsvd3:4;
+ u64 desc_progress_limit:2;
+ u64 rsvd4:2;
+ u64 batch_progress_limit:2;
+ u64 rsvd5:26;
};
- u32 bits;
+ u64 bits;
} __packed;
struct grpcfg {
@@ -348,8 +356,11 @@ union wqcfg {
/* bytes 28-31 */
u32 rsvd8;
+
+ /* bytes 32-63 */
+ u64 op_config[4];
};
- u32 bits[8];
+ u32 bits[16];
} __packed;
#define WQCFG_PASID_IDX 2
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3f262a57441b..bdaccf9e0436 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -443,6 +443,67 @@ static struct device_attribute dev_attr_group_traffic_class_b =
__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
group_traffic_class_b_store);
+static ssize_t group_desc_progress_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
+}
+
+static ssize_t group_desc_progress_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+ int val, rc;
+
+ rc = kstrtoint(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val & ~GENMASK(1, 0))
+ return -EINVAL;
+
+ group->desc_progress_limit = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_desc_progress_limit =
+ __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
+ group_desc_progress_limit_store);
+
+static ssize_t group_batch_progress_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
+}
+
+static ssize_t group_batch_progress_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+ int val, rc;
+
+ rc = kstrtoint(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val & ~GENMASK(1, 0))
+ return -EINVAL;
+
+ group->batch_progress_limit = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_batch_progress_limit =
+ __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
+ group_batch_progress_limit_store);
static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_work_queues.attr,
&dev_attr_group_engines.attr,
@@ -454,11 +515,35 @@ static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_read_buffers_reserved.attr,
&dev_attr_group_traffic_class_a.attr,
&dev_attr_group_traffic_class_b.attr,
+ &dev_attr_group_desc_progress_limit.attr,
+ &dev_attr_group_batch_progress_limit.attr,
NULL,
};
+static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return (attr == &dev_attr_group_desc_progress_limit.attr ||
+ attr == &dev_attr_group_batch_progress_limit.attr) &&
+ !idxd->hw.group_cap.progress_limit;
+}
+
+static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct idxd_group *group = confdev_to_group(dev);
+ struct idxd_device *idxd = group->idxd;
+
+ if (idxd_group_attr_progress_limit_invisible(attr, idxd))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group idxd_group_attribute_group = {
.attrs = idxd_group_attributes,
+ .is_visible = idxd_group_attr_visible,
};
static const struct attribute_group *idxd_group_attribute_groups[] = {
@@ -973,7 +1058,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%u\n", wq->ats_dis);
+ return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
}
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
@@ -994,7 +1079,10 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (rc < 0)
return rc;
- wq->ats_dis = ats_dis;
+ if (ats_dis)
+ set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
+ else
+ clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
return count;
}
@@ -1055,6 +1143,68 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t wq_op_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+}
+
+static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
+{
+ int bit;
+
+ /*
+ * The OPCAP is defined as 256 bits that represents each operation the device
+ * supports per bit. Iterate through all the bits and check if the input mask
+ * is set for bits that are not set in the OPCAP for the device. If no OPCAP
+ * bit is set and input mask has the bit set, then return error.
+ */
+ for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
+ if (!test_bit(bit, idxd->opcap_bmap))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ unsigned long *opmask;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!opmask)
+ return -ENOMEM;
+
+ rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
+ if (rc < 0)
+ goto err;
+
+ rc = idxd_verify_supported_opcap(idxd, opmask);
+ if (rc < 0)
+ goto err;
+
+ bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
+
+ bitmap_free(opmask);
+ return count;
+
+err:
+ bitmap_free(opmask);
+ return rc;
+}
+
+static struct device_attribute dev_attr_wq_op_config =
+ __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1072,11 +1222,33 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_ats_disable.attr,
&dev_attr_wq_occupancy.attr,
&dev_attr_wq_enqcmds_retries.attr,
+ &dev_attr_wq_op_config.attr,
NULL,
};
+static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return attr == &dev_attr_wq_op_config.attr &&
+ !idxd->hw.wq_cap.op_config;
+}
+
+static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group idxd_wq_attribute_group = {
.attrs = idxd_wq_attributes,
+ .is_visible = idxd_wq_attr_visible,
};
static const struct attribute_group *idxd_wq_attribute_groups[] = {
@@ -1088,6 +1260,7 @@ static void idxd_conf_wq_release(struct device *dev)
{
struct idxd_wq *wq = confdev_to_wq(dev);
+ bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
kfree(wq);
}
@@ -1177,14 +1350,8 @@ static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- int i, rc = 0;
-
- for (i = 0; i < 4; i++)
- rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
- rc--;
- rc += sysfs_emit_at(buf, rc, "\n");
- return rc;
+ return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
@@ -1405,9 +1572,11 @@ static void idxd_conf_device_release(struct device *dev)
struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
+ bitmap_free(idxd->wq_enable_map);
kfree(idxd->wqs);
kfree(idxd->engines);
ida_free(&idxd_ida, idxd->id);
+ bitmap_free(idxd->opcap_bmap);
kfree(idxd);
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 37ff4ec7db76..e2070df6cad2 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
@@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
@@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan)
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 140cfe3782fb..35e06b382603 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -196,10 +196,8 @@ extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
extern struct kobj_type ioat_ktype;
extern struct kmem_cache *ioat_cache;
-extern int ioat_ring_max_alloc_order;
extern struct kmem_cache *ioat_sed_cache;
static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 994fc4d2aca4..dc147cc2436e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -670,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
return mxs_chan->status;
}
-static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
+static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
@@ -741,7 +741,7 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
ofdma->of_node);
}
-static int __init mxs_dma_probe(struct platform_device *pdev)
+static int mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
@@ -839,10 +839,7 @@ static struct platform_driver mxs_dma_driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
+ .probe = mxs_dma_probe,
};
-static int __init mxs_dma_module_init(void)
-{
- return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
-}
-subsys_initcall(mxs_dma_module_init);
+builtin_platform_driver(mxs_dma_driver);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 09915a5cba3e..0d9257fbdfb0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2752,7 +2752,6 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
pch->cyclic = true;
- desc->txd.flags = flags;
return &desc->txd;
}
@@ -2804,8 +2803,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->bytes_requested = len;
- desc->txd.flags = flags;
-
return &desc->txd;
}
@@ -2889,7 +2886,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
/* Return the last desc in the chain */
- desc->txd.flags = flg;
return &desc->txd;
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 8f0c9c4e2efd..3f56514bbef8 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1150,9 +1150,9 @@ static void gpi_ev_tasklet(unsigned long data)
{
struct gpii *gpii = (struct gpii *)data;
- read_lock_bh(&gpii->pm_lock);
+ read_lock(&gpii->pm_lock);
if (!REG_ACCESS_VALID(gpii->pm_state)) {
- read_unlock_bh(&gpii->pm_lock);
+ read_unlock(&gpii->pm_lock);
dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
TO_GPI_PM_STR(gpii->pm_state));
return;
@@ -1163,7 +1163,7 @@ static void gpi_ev_tasklet(unsigned long data)
/* enable IEOB, switching back to interrupts */
gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
- read_unlock_bh(&gpii->pm_lock);
+ read_unlock(&gpii->pm_lock);
}
/* marks all pending events for the channel as stale */
@@ -2288,6 +2288,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm6350-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index facdacf8aede..d56caf1681ff 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -379,13 +379,13 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
if (blk_size < 0) {
dev_err(adev->dev, "invalid burst value: %d\n",
burst);
- return ERR_PTR(-EINVAL);
+ return NULL;
}
crci = achan->crci & 0xf;
if (!crci || achan->crci > 0x1f) {
dev_err(adev->dev, "invalid crci value\n");
- return ERR_PTR(-EINVAL);
+ return NULL;
}
}
@@ -403,8 +403,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
}
async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
- if (!async_desc)
- return ERR_PTR(-ENOMEM);
+ if (!async_desc) {
+ dev_err(adev->dev, "not enough memory for async_desc struct\n");
+ return NULL;
+ }
async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
async_desc->crci = crci;
@@ -414,8 +416,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
sizeof(*cple) + 2 * ADM_DESC_ALIGN;
async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
- if (!async_desc->cpl)
+ if (!async_desc->cpl) {
+ dev_err(adev->dev, "not enough memory for cpl struct\n");
goto free;
+ }
async_desc->adev = adev;
@@ -437,8 +441,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
async_desc->dma_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(adev->dev, async_desc->dma_addr))
+ if (dma_mapping_error(adev->dev, async_desc->dma_addr)) {
+ dev_err(adev->dev, "dma mapping error for cpl\n");
goto free;
+ }
cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
@@ -454,7 +460,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
free:
kfree(async_desc);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
/**
@@ -494,7 +500,7 @@ static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
spin_lock_irqsave(&achan->vc.lock, flag);
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
- if (cfg->peripheral_size == sizeof(config))
+ if (cfg->peripheral_size == sizeof(*config))
achan->crci = config->crci;
spin_unlock_irqrestore(&achan->vc.lock, flag);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f6ed7e889781..a09eeb545f7d 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1094,7 +1094,7 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
INIT_LIST_HEAD(&dmadev->channels);
/*
- * Register as many many memcpy as we have physical channels,
+ * Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 4f8b8498c5c6..6b524eb6bcf3 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -405,10 +405,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan = &pdma->chans[i];
irq = platform_get_irq(pdev, i * 2);
- if (irq < 0) {
- dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+ if (irq < 0)
return -EINVAL;
- }
r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
dev_name(&pdev->dev), (void *)chan);
@@ -420,10 +418,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan->txirq = irq;
irq = platform_get_irq(pdev, (i * 2) + 1);
- if (irq < 0) {
- dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+ if (irq < 0)
return -EINVAL;
- }
r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
dev_name(&pdev->dev), (void *)chan);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 13d12d660cc2..641d689d17ff 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -103,8 +103,8 @@ struct rcar_dmac_desc_page {
struct list_head node;
union {
- struct rcar_dmac_desc descs[0];
- struct rcar_dmac_xfer_chunk chunks[0];
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs);
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks);
};
};
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index adb25a11c70f..4891a1767e5a 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -9,6 +9,7 @@
* Pierre-Yves Mordret <pierre-yves.mordret@st.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,8 +33,10 @@
#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
+#define STM32_DMA_ISR(n) (((n) & 4) ? STM32_DMA_HISR : STM32_DMA_LISR)
#define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
#define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
+#define STM32_DMA_IFCR(n) (((n) & 4) ? STM32_DMA_HIFCR : STM32_DMA_LIFCR)
#define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
#define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
@@ -43,23 +46,22 @@
| STM32_DMA_TEI \
| STM32_DMA_DMEI \
| STM32_DMA_FEI)
+/*
+ * If (chan->id % 4) is 2 or 3, left shift the mask by 16 bits;
+ * if (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+ */
+#define STM32_DMA_FLAGS_SHIFT(n) ({ typeof(n) (_n) = (n); \
+ (((_n) & 2) << 3) | (((_n) & 1) * 6); })
/* DMA Stream x Configuration Register */
#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
-#define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
+#define STM32_DMA_SCR_REQ_MASK GENMASK(27, 25)
#define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
-#define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
#define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
-#define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
#define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
-#define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
#define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
-#define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
#define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
-#define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
-#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
-#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
@@ -96,7 +98,6 @@
/* DMA stream x FIFO control register */
#define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
#define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
-#define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
#define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
#define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
#define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
@@ -137,11 +138,9 @@
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
-#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
-#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
-#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
+#define STM32_DMA_MDMA_STREAM_ID_MASK GENMASK(19, 16)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -195,6 +194,19 @@ struct stm32_dma_desc {
struct stm32_dma_sg_req sg_req[];
};
+/**
+ * struct stm32_dma_mdma_config - STM32 DMA MDMA configuration
+ * @stream_id: DMA request to trigger STM32 MDMA transfer
+ * @ifcr: DMA interrupt flag clear register address,
+ * used by STM32 MDMA to clear DMA Transfer Complete flag
+ * @tcf: DMA Transfer Complete flag
+ */
+struct stm32_dma_mdma_config {
+ u32 stream_id;
+ u32 ifcr;
+ u32 tcf;
+};
+
struct stm32_dma_chan {
struct virt_dma_chan vchan;
bool config_init;
@@ -209,6 +221,8 @@ struct stm32_dma_chan {
u32 mem_burst;
u32 mem_width;
enum dma_status status;
+ bool trig_mdma;
+ struct stm32_dma_mdma_config mdma_config;
};
struct stm32_dma_device {
@@ -388,6 +402,13 @@ static int stm32_dma_slave_config(struct dma_chan *c,
memcpy(&chan->dma_sconfig, config, sizeof(*config));
+ /* Check if user is requesting DMA to trigger STM32 MDMA */
+ if (config->peripheral_size) {
+ config->peripheral_config = &chan->mdma_config;
+ config->peripheral_size = sizeof(chan->mdma_config);
+ chan->trig_mdma = true;
+ }
+
chan->config_init = true;
return 0;
@@ -401,17 +422,10 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
/*
* Read "flags" from DMA_xISR register corresponding to the selected
* DMA channel at the correct bit offset inside that register.
- *
- * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
- * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
- if (chan->id & 4)
- dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
- else
- dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
-
- flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+ dma_isr = stm32_dma_read(dmadev, STM32_DMA_ISR(chan->id));
+ flags = dma_isr >> STM32_DMA_FLAGS_SHIFT(chan->id);
return flags & STM32_DMA_MASKI;
}
@@ -424,17 +438,11 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
/*
* Write "flags" to the DMA_xIFCR register corresponding to the selected
* DMA channel at the correct bit offset inside that register.
- *
- * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
- * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
flags &= STM32_DMA_MASKI;
- dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+ dma_ifcr = flags << STM32_DMA_FLAGS_SHIFT(chan->id);
- if (chan->id & 4)
- stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
- else
- stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
+ stm32_dma_write(dmadev, STM32_DMA_IFCR(chan->id), dma_ifcr);
}
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
@@ -576,6 +584,10 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
sg_req = &chan->desc->sg_req[chan->next_sg];
reg = &sg_req->chan_reg;
+ /* When DMA triggers STM32 MDMA, DMA Transfer Complete is managed by STM32 MDMA */
+ if (chan->trig_mdma && chan->dma_sconfig.direction != DMA_MEM_TO_DEV)
+ reg->dma_scr &= ~STM32_DMA_SCR_TCIE;
+
reg->dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
@@ -725,6 +737,8 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
+ if (chan->trig_mdma)
+ return;
stm32_dma_sg_inc(chan);
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
@@ -861,7 +875,8 @@ static int stm32_dma_resume(struct dma_chan *c)
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
ndtr = sg_req->chan_reg.dma_sndtr;
- offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
+ offset = (ndtr - chan_reg.dma_sndtr);
+ offset <<= FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, chan_reg.dma_scr);
spar = sg_req->chan_reg.dma_spar;
sm0ar = sg_req->chan_reg.dma_sm0ar;
sm1ar = sg_req->chan_reg.dma_sm1ar;
@@ -973,16 +988,16 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
if (src_burst_size < 0)
return src_burst_size;
- dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
- STM32_DMA_SCR_PSIZE(dst_bus_width) |
- STM32_DMA_SCR_MSIZE(src_bus_width) |
- STM32_DMA_SCR_PBURST(dst_burst_size) |
- STM32_DMA_SCR_MBURST(src_burst_size);
+ dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_DEV) |
+ FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, dst_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, src_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dst_burst_size) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, src_burst_size);
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
+ chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
@@ -1030,16 +1045,16 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
if (dst_burst_size < 0)
return dst_burst_size;
- dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
- STM32_DMA_SCR_PSIZE(src_bus_width) |
- STM32_DMA_SCR_MSIZE(dst_bus_width) |
- STM32_DMA_SCR_PBURST(src_burst_size) |
- STM32_DMA_SCR_MBURST(dst_burst_size);
+ dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_DEV_TO_MEM) |
+ FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, src_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, dst_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, src_burst_size) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dst_burst_size);
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
+ chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
@@ -1099,6 +1114,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
else
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+ /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
+ if (chan->trig_mdma && sg_len > 1)
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
sg_dma_len(sg),
@@ -1120,6 +1139,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
+ if (chan->trig_mdma)
+ desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
}
@@ -1207,8 +1228,11 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
+ if (chan->trig_mdma)
+ desc->sg_req[i].chan_reg.dma_sm1ar += period_len;
desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
- buf_addr += period_len;
+ if (!chan->trig_mdma)
+ buf_addr += period_len;
}
desc->num_sgs = num_periods;
@@ -1247,16 +1271,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
desc->sg_req[i].chan_reg.dma_scr =
- STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
- STM32_DMA_SCR_PBURST(dma_burst) |
- STM32_DMA_SCR_MBURST(dma_burst) |
+ FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_MEM) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dma_burst) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dma_burst) |
STM32_DMA_SCR_MINC |
STM32_DMA_SCR_PINC |
STM32_DMA_SCR_TCIE |
STM32_DMA_SCR_TEIE;
desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
- desc->sg_req[i].chan_reg.dma_sfcr |=
- STM32_DMA_SFCR_FTH(threshold);
+ desc->sg_req[i].chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, threshold);
desc->sg_req[i].chan_reg.dma_spar = src + offset;
desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
@@ -1275,7 +1298,7 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+ width = FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, dma_scr);
ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
return ndtr << width;
@@ -1481,16 +1504,17 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
stm32_dma_clear_reg(&chan->chan_reg);
chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
- chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
+ chan->chan_reg.dma_scr |= FIELD_PREP(STM32_DMA_SCR_REQ_MASK, cfg->request_line);
/* Enable Interrupts */
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
- chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
- if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
+ chan->threshold = FIELD_GET(STM32_DMA_THRESHOLD_FTR_MASK, cfg->features);
+ if (FIELD_GET(STM32_DMA_DIRECT_MODE_MASK, cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
- if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
+ if (FIELD_GET(STM32_DMA_ALT_ACK_MODE_MASK, cfg->features))
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
+ chan->mdma_config.stream_id = FIELD_GET(STM32_DMA_MDMA_STREAM_ID_MASK, cfg->features);
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -1630,6 +1654,12 @@ static int stm32_dma_probe(struct platform_device *pdev)
chan->id = i;
chan->vchan.desc_free = stm32_dma_desc_free;
vchan_init(&chan->vchan, dd);
+
+ chan->mdma_config.ifcr = res->start;
+ chan->mdma_config.ifcr += STM32_DMA_IFCR(chan->id);
+
+ chan->mdma_config.tcf = STM32_DMA_TCI;
+ chan->mdma_config.tcf <<= STM32_DMA_FLAGS_SHIFT(chan->id);
}
ret = dma_async_device_register(dd);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index eee0c5aa5fb5..ee3cbbf51006 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -39,13 +39,13 @@ struct stm32_dmamux_data {
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */
- unsigned long *dma_inuse; /* Used DMA channel */
+ DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
* in suspend
*/
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters.
- * To be kept at very end end of this structure
+ * To be kept at very end of this structure
*/
};
@@ -147,7 +147,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
mux->request = dma_spec->args[0];
/* craft DMA spec */
- dma_spec->args[3] = dma_spec->args[2];
+ dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
dma_spec->args[2] = dma_spec->args[1];
dma_spec->args[1] = 0;
dma_spec->args[0] = mux->chan_id - min;
@@ -229,12 +229,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->dma_requests = dma_req;
stm32_dmamux->dma_reqs[0] = count;
- stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
- BITS_TO_LONGS(dma_req),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!stm32_dmamux->dma_inuse)
- return -ENOMEM;
if (device_property_read_u32(&pdev->dev, "dma-requests",
&stm32_dmamux->dmamux_requests)) {
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index b11927ed4367..e28acbcb53f4 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,6 +199,7 @@ struct stm32_mdma_chan_config {
u32 transfer_config;
u32 mask_addr;
u32 mask_data;
+ bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
};
struct stm32_mdma_hwdesc {
@@ -227,6 +228,12 @@ struct stm32_mdma_desc {
struct stm32_mdma_desc_node node[];
};
+struct stm32_mdma_dma_config {
+ u32 request; /* STM32 DMA channel stream id, triggering MDMA */
+ u32 cmar; /* STM32 DMA interrupt flag clear register address */
+ u32 cmdr; /* STM32 DMA Transfer Complete flag */
+};
+
struct stm32_mdma_chan {
struct virt_dma_chan vchan;
struct dma_pool *desc_pool;
@@ -539,13 +546,23 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
dst_addr = chan->dma_config.dst_addr;
/* Set device data size */
+ if (chan_config->m2m_hw)
+ dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
+ STM32_MDMA_MAX_BUF_LEN);
dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
return dst_bus_width;
ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
+ if (chan_config->m2m_hw) {
+ ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+ }
/* Set device burst value */
+ if (chan_config->m2m_hw)
+ dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+
dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
dst_maxburst,
dst_addr_width);
@@ -588,13 +605,24 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_addr = chan->dma_config.src_addr;
/* Set device data size */
+ if (chan_config->m2m_hw)
+ src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
+ STM32_MDMA_MAX_BUF_LEN);
+
src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
return src_bus_width;
ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
+ if (chan_config->m2m_hw) {
+ ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
+ }
/* Set device burst value */
+ if (chan_config->m2m_hw)
+ src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+
src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
src_maxburst,
src_addr_width);
@@ -702,11 +730,15 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct scatterlist *sg;
dma_addr_t src_addr, dst_addr;
- u32 ccr, ctcr, ctbr;
+ u32 m2m_hw_period, ccr, ctcr, ctbr;
int i, ret = 0;
+ if (chan_config->m2m_hw)
+ m2m_hw_period = sg_dma_len(sgl);
+
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
dev_err(chan2dev(chan), "Invalid block len\n");
@@ -716,6 +748,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = dma_config->dst_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ dst_addr += m2m_hw_period;
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, src_addr,
sg_dma_len(sg));
@@ -723,6 +757,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
src_addr);
} else {
src_addr = dma_config->src_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ src_addr += m2m_hw_period;
dst_addr = sg_dma_address(sg);
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, dst_addr,
@@ -755,6 +791,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
int i, ret;
@@ -777,6 +814,21 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
if (ret < 0)
goto xfer_setup_err;
+ /*
+ * In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
+ * transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
+ * with the next sg element and update some data in dmaengine framework.
+ */
+ if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
+ struct stm32_mdma_hwdesc *hwdesc;
+
+ for (i = 0; i < sg_len; i++) {
+ hwdesc = desc->node[i].hwdesc;
+ hwdesc->cmar = 0;
+ hwdesc->cmdr = 0;
+ }
+ }
+
desc->cyclic = false;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -798,6 +850,7 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
dma_addr_t src_addr, dst_addr;
u32 ccr, ctcr, ctbr, count;
@@ -858,8 +911,12 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
if (direction == DMA_MEM_TO_DEV) {
src_addr = buf_addr + i * period_len;
dst_addr = dma_config->dst_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ dst_addr += period_len;
} else {
src_addr = dma_config->src_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ src_addr += period_len;
dst_addr = buf_addr + i * period_len;
}
@@ -1244,6 +1301,17 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
memcpy(&chan->dma_config, config, sizeof(*config));
+ /* Check if user is requesting STM32 DMA to trigger MDMA */
+ if (config->peripheral_size) {
+ struct stm32_mdma_dma_config *mdma_config;
+
+ mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
+ chan->chan_config.request = mdma_config->request;
+ chan->chan_config.mask_addr = mdma_config->cmar;
+ chan->chan_config.mask_data = mdma_config->cmdr;
+ chan->chan_config.m2m_hw = true;
+ }
+
return 0;
}
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 4cbca80ee16e..fa06d7e6d8e3 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -352,12 +352,6 @@ static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
edma_modify(ecc, offset + (i << 2), and, or);
}
-static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
- unsigned or)
-{
- edma_or(ecc, offset + (i << 2), or);
-}
-
static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
unsigned or)
{
@@ -370,11 +364,6 @@ static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
edma_write(ecc, offset + ((i * 2 + j) << 2), val);
}
-static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
-{
- return edma_read(ecc, EDMA_SHADOW0 + offset);
-}
-
static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
int offset, int i)
{
@@ -393,36 +382,12 @@ static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
}
-static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
- int param_no)
-{
- return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
-}
-
-static inline void edma_param_write(struct edma_cc *ecc, int offset,
- int param_no, unsigned val)
-{
- edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
-}
-
static inline void edma_param_modify(struct edma_cc *ecc, int offset,
int param_no, unsigned and, unsigned or)
{
edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
}
-static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
- unsigned and)
-{
- edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
-}
-
-static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
- unsigned or)
-{
- edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -743,11 +708,6 @@ static void edma_free_channel(struct edma_chan *echan)
edma_setup_interrupt(echan, false);
}
-static inline struct edma_cc *to_edma_cc(struct dma_device *d)
-{
- return container_of(d, struct edma_cc, dma_slave);
-}
-
static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
return container_of(c, struct edma_chan, vchan.chan);
diff --git a/drivers/dma/ti/k3-psil-j7200.c b/drivers/dma/ti/k3-psil-j7200.c
index 5ea63ea74822..e3feff869991 100644
--- a/drivers/dma/ti/k3-psil-j7200.c
+++ b/drivers/dma/ti/k3-psil-j7200.c
@@ -143,6 +143,57 @@ static struct psil_ep j7200_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j7200_dst_ep_map[] = {
+ /* PDMA_MCASP - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ PSIL_PDMA_XY_PKT(0xc608),
+ PSIL_PDMA_XY_PKT(0xc609),
+ PSIL_PDMA_XY_PKT(0xc60a),
+ PSIL_PDMA_XY_PKT(0xc60b),
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ PSIL_PDMA_XY_PKT(0xc614),
+ PSIL_PDMA_XY_PKT(0xc615),
+ PSIL_PDMA_XY_PKT(0xc616),
+ PSIL_PDMA_XY_PKT(0xc617),
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW5 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -161,6 +212,22 @@ static struct psil_ep j7200_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA_MISC_G2 - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 34e3fc565a37..e7c83d668bb6 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -266,6 +266,69 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xc205),
PSIL_ETHERNET(0xc206),
PSIL_ETHERNET(0xc207),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ PSIL_PDMA_MCASP(0xc503),
+ PSIL_PDMA_MCASP(0xc504),
+ PSIL_PDMA_MCASP(0xc505),
+ PSIL_PDMA_MCASP(0xc506),
+ PSIL_PDMA_MCASP(0xc507),
+ PSIL_PDMA_MCASP(0xc508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0xc624),
+ PSIL_PDMA_XY_PKT(0xc625),
+ PSIL_PDMA_XY_PKT(0xc626),
+ PSIL_PDMA_XY_PKT(0xc627),
+ PSIL_PDMA_XY_PKT(0xc628),
+ PSIL_PDMA_XY_PKT(0xc629),
+ PSIL_PDMA_XY_PKT(0xc630),
+ PSIL_PDMA_XY_PKT(0xc63a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW9 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -284,6 +347,22 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 2f0d2c68c93c..7b5081989b3d 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -263,6 +263,7 @@ struct udma_chan_config {
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
u32 tr_trigger_type;
+ unsigned long tx_flags;
/* PKDMA mapped channel */
int mapped_channel_id;
@@ -300,8 +301,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
- u32 bcnt; /* number of bytes completed since the start of the channel */
-
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -757,6 +756,20 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+{
+ if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ } else {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (!uc->bchan)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+}
+
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
@@ -790,8 +803,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
-
- uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
@@ -1045,9 +1056,14 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
{
u32 peer_bcnt, bcnt;
- /* Only TX towards PDMA is affected */
+ /*
+ * Only TX towards PDMA is affected.
+ * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
+ * completion calculation, consumer must ensure that there is no stale
+ * data in DMA fabric in this case.
+ */
if (uc->config.ep_type == PSIL_EP_NATIVE ||
- uc->config.dir != DMA_MEM_TO_DEV)
+ uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
return true;
peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
@@ -1115,7 +1131,7 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
@@ -1168,7 +1184,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
@@ -1204,7 +1220,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
@@ -3408,6 +3424,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!burst)
burst = 1;
+ uc->config.tx_flags = tx_flags;
+
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
@@ -3809,7 +3827,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
- bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 3f4ee3954384..21472a5d7636 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -796,6 +796,17 @@ static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
}
/**
+ * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context.
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_synchronize(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/**
* zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
* @dchan: DMA channel
* @dma_dst: Destination buffer address
@@ -1057,6 +1068,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p = &zdev->common;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
+ p->device_synchronize = zynqmp_dma_synchronize;
p->device_issue_pending = zynqmp_dma_issue_pending;
p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 17562cf1fe97..456602d373b7 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -473,7 +473,7 @@ config EDAC_ALTERA_SDMMC
config EDAC_SIFIVE
bool "Sifive platform EDAC driver"
- depends on EDAC=y && SIFIVE_L2
+ depends on EDAC=y && SIFIVE_CCACHE
help
Support for error detection and correction on the SiFive SoCs.
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index ee800aec7d47..b844e2626fd5 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -2,7 +2,7 @@
/*
* SiFive Platform EDAC Driver
*
- * Copyright (C) 2018-2019 SiFive, Inc.
+ * Copyright (C) 2018-2022 SiFive, Inc.
*
* This driver is partially based on octeon_edac-pc.c
*
@@ -10,7 +10,7 @@
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
-#include <soc/sifive/sifive_l2_cache.h>
+#include <soc/sifive/sifive_ccache.h>
#define DRVNAME "sifive_edac"
@@ -32,9 +32,9 @@ int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr)
p = container_of(this, struct sifive_edac_priv, notifier);
- if (event == SIFIVE_L2_ERR_TYPE_UE)
+ if (event == SIFIVE_CCACHE_ERR_TYPE_UE)
edac_device_handle_ue(p->dci, 0, 0, msg);
- else if (event == SIFIVE_L2_ERR_TYPE_CE)
+ else if (event == SIFIVE_CCACHE_ERR_TYPE_CE)
edac_device_handle_ce(p->dci, 0, 0, msg);
return NOTIFY_OK;
@@ -67,7 +67,7 @@ static int ecc_register(struct platform_device *pdev)
goto err;
}
- register_sifive_l2_error_notifier(&p->notifier);
+ register_sifive_ccache_error_notifier(&p->notifier);
return 0;
@@ -81,7 +81,7 @@ static int ecc_unregister(struct platform_device *pdev)
{
struct sifive_edac_priv *p = platform_get_drvdata(pdev);
- unregister_sifive_l2_error_notifier(&p->notifier);
+ unregister_sifive_ccache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->dci);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index dca7cecb37e3..290186e44e6b 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -183,7 +183,7 @@ config EXTCON_USBC_CROS_EC
config EXTCON_USBC_TUSB320
tristate "TI TUSB320 USB-C extcon support"
- depends on I2C
+ depends on I2C && TYPEC
select REGMAP_I2C
help
Say Y here to enable support for USB Type C cable detection extcon
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index 6ba3d89b106d..41041ff0fadb 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver
*
* Copyright (C) 2020 National Instruments Corporation
* Author: Michael Auchter <michael.auchter@ni.com>
*/
+#include <linux/bitfield.h>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -13,6 +14,24 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/usb/typec.h>
+
+#define TUSB320_REG8 0x8
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2
+#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4)
+#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0
+#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1
+#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2
+#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3
+#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2)
+#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0
+#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4
+#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6
+#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0)
#define TUSB320_REG9 0x9
#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6
@@ -55,6 +74,10 @@ struct tusb320_priv {
struct extcon_dev *edev;
struct tusb320_ops *ops;
enum tusb320_attached_state state;
+ struct typec_port *port;
+ struct typec_capability cap;
+ enum typec_port_type port_type;
+ enum typec_pwr_opmode pwr_opmode;
};
static const char * const tusb_attached_states[] = {
@@ -184,19 +207,47 @@ static struct tusb320_ops tusb320l_ops = {
.get_revision = tusb320l_get_revision,
};
-static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv)
{
- struct tusb320_priv *priv = dev_id;
- int state, polarity;
- unsigned reg;
+ u8 mode;
+
+ if (priv->pwr_opmode == TYPEC_PWR_MODE_USB)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB;
+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A;
+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A;
+ else /* No other mode is supported. */
+ return -EINVAL;
- if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
- dev_err(priv->dev, "error during i2c read!\n");
- return IRQ_NONE;
- }
+ return regmap_write_bits(priv->regmap, TUSB320_REG8,
+ TUSB320_REG8_CURRENT_MODE_ADVERTISE,
+ FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE,
+ mode));
+}
- if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
- return IRQ_NONE;
+static int tusb320_port_type_set(struct typec_port *port,
+ enum typec_port_type type)
+{
+ struct tusb320_priv *priv = typec_get_drvdata(port);
+
+ if (type == TYPEC_PORT_SRC)
+ return priv->ops->set_mode(priv, TUSB320_MODE_DFP);
+ else if (type == TYPEC_PORT_SNK)
+ return priv->ops->set_mode(priv, TUSB320_MODE_UFP);
+ else if (type == TYPEC_PORT_DRP)
+ return priv->ops->set_mode(priv, TUSB320_MODE_DRP);
+ else
+ return priv->ops->set_mode(priv, TUSB320_MODE_PORT);
+}
+
+static const struct typec_operations tusb320_typec_ops = {
+ .port_type_set = tusb320_port_type_set,
+};
+
+static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg)
+{
+ int state, polarity;
state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
TUSB320_REG9_ATTACHED_STATE_MASK;
@@ -219,6 +270,64 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
extcon_sync(priv->edev, EXTCON_USB_HOST);
priv->state = state;
+}
+
+static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
+{
+ struct typec_port *port = priv->port;
+ struct device *dev = priv->dev;
+ u8 mode, role, state;
+ int ret, reg8;
+ bool ori;
+
+ ori = reg9 & TUSB320_REG9_CABLE_DIRECTION;
+ typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE :
+ TYPEC_ORIENTATION_NORMAL);
+
+ state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
+ TUSB320_REG9_ATTACHED_STATE_MASK;
+ if (state == TUSB320_ATTACHED_STATE_DFP)
+ role = TYPEC_SOURCE;
+ else
+ role = TYPEC_SINK;
+
+ typec_set_vconn_role(port, role);
+ typec_set_pwr_role(port, role);
+ typec_set_data_role(port, role == TYPEC_SOURCE ?
+ TYPEC_HOST : TYPEC_DEVICE);
+
+ ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
+ if (ret) {
+ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
+ return;
+ }
+
+ mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8);
+ if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A);
+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A);
+ else /* Charge through accessory */
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+}
+
+static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+{
+ struct tusb320_priv *priv = dev_id;
+ unsigned int reg;
+
+ if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
+ dev_err(priv->dev, "error during i2c read!\n");
+ return IRQ_NONE;
+ }
+
+ if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
+ return IRQ_NONE;
+
+ tusb320_extcon_irq_handler(priv, reg);
+ tusb320_typec_irq_handler(priv, reg);
regmap_write(priv->regmap, TUSB320_REG9, reg);
@@ -230,8 +339,84 @@ static const struct regmap_config tusb320_regmap_config = {
.val_bits = 8,
};
-static int tusb320_extcon_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tusb320_extcon_probe(struct tusb320_priv *priv)
+{
+ int ret;
+
+ priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
+ if (IS_ERR(priv->edev)) {
+ dev_err(priv->dev, "failed to allocate extcon device\n");
+ return PTR_ERR(priv->edev);
+ }
+
+ ret = devm_extcon_dev_register(priv->dev, priv->edev);
+ if (ret < 0) {
+ dev_err(priv->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(priv->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+
+ return 0;
+}
+
+static int tusb320_typec_probe(struct i2c_client *client,
+ struct tusb320_priv *priv)
+{
+ struct fwnode_handle *connector;
+ const char *cap_str;
+ int ret;
+
+ /* The Type-C connector is optional, for backward compatibility. */
+ connector = device_get_named_child_node(&client->dev, "connector");
+ if (!connector)
+ return 0;
+
+ /* Type-C connector found. */
+ ret = typec_get_fw_cap(&priv->cap, connector);
+ if (ret)
+ return ret;
+
+ priv->port_type = priv->cap.type;
+
+ /* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
+ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
+ if (ret)
+ return ret;
+
+ ret = typec_find_pwr_opmode(cap_str);
+ if (ret < 0)
+ return ret;
+ if (ret == TYPEC_PWR_MODE_PD)
+ return -EINVAL;
+
+ priv->pwr_opmode = ret;
+
+ /* Initialize the hardware with the devicetree settings. */
+ ret = tusb320_set_adv_pwr_mode(priv);
+ if (ret)
+ return ret;
+
+ priv->cap.revision = USB_TYPEC_REV_1_1;
+ priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
+ priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG;
+ priv->cap.orientation_aware = true;
+ priv->cap.driver_data = priv;
+ priv->cap.ops = &tusb320_typec_ops;
+ priv->cap.fwnode = connector;
+
+ priv->port = typec_register_port(&client->dev, &priv->cap);
+ if (IS_ERR(priv->port))
+ return PTR_ERR(priv->port);
+
+ return 0;
+}
+
+static int tusb320_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tusb320_priv *priv;
const void *match_data;
@@ -257,12 +442,6 @@ static int tusb320_extcon_probe(struct i2c_client *client,
priv->ops = (struct tusb320_ops*)match_data;
- priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
- if (IS_ERR(priv->edev)) {
- dev_err(priv->dev, "failed to allocate extcon device\n");
- return PTR_ERR(priv->edev);
- }
-
if (priv->ops->get_revision) {
ret = priv->ops->get_revision(priv, &revision);
if (ret)
@@ -272,16 +451,13 @@ static int tusb320_extcon_probe(struct i2c_client *client,
dev_info(priv->dev, "chip revision %d\n", revision);
}
- ret = devm_extcon_dev_register(priv->dev, priv->edev);
- if (ret < 0) {
- dev_err(priv->dev, "failed to register extcon device\n");
+ ret = tusb320_extcon_probe(priv);
+ if (ret)
return ret;
- }
- extcon_set_property_capability(priv->edev, EXTCON_USB,
- EXTCON_PROP_USB_TYPEC_POLARITY);
- extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
- EXTCON_PROP_USB_TYPEC_POLARITY);
+ ret = tusb320_typec_probe(client, priv);
+ if (ret)
+ return ret;
/* update initial state */
tusb320_irq_handler(client->irq, priv);
@@ -313,7 +489,7 @@ static const struct of_device_id tusb320_extcon_dt_match[] = {
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
static struct i2c_driver tusb320_extcon_driver = {
- .probe = tusb320_extcon_probe,
+ .probe = tusb320_probe,
.driver = {
.name = "extcon-tusb320",
.of_match_table = tusb320_extcon_dt_match,
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 0eb6b617f709..015c95a825d3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -567,8 +567,13 @@ static int __init dmi_present(const u8 *buf)
{
u32 smbios_ver;
+ /*
+ * The size of this structure is 31 bytes, but we also accept value
+ * 30 due to a mistake in SMBIOS specification version 2.1.
+ */
if (memcmp(buf, "_SM_", 4) == 0 &&
- buf[5] < 32 && dmi_checksum(buf, buf[5])) {
+ buf[5] >= 30 && buf[5] <= 32 &&
+ dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
smbios_entry_point_size = buf[5];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
@@ -629,7 +634,8 @@ static int __init dmi_present(const u8 *buf)
static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
- buf[6] < 32 && dmi_checksum(buf, buf[6])) {
+ buf[6] >= 24 && buf[6] <= 32 &&
+ dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 6cb7384ad2ac..5b79a4a4a88d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -105,9 +105,50 @@ config EFI_RUNTIME_WRAPPERS
config EFI_GENERIC_STUB
bool
+config EFI_ZBOOT
+ bool "Enable the generic EFI decompressor"
+ depends on EFI_GENERIC_STUB && !ARM
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
+ help
+ Create the bootable image as an EFI application that carries the
+ actual kernel image in compressed form, and decompresses it into
+ memory before executing it via LoadImage/StartImage EFI boot service
+ calls. For compatibility with non-EFI loaders, the payload can be
+ decompressed and executed by the loader as well, provided that the
+ loader implements the decompression algorithm and that non-EFI boot
+ is supported by the encapsulated image. (The compression algorithm
+ used is described in the zboot image header)
+
+config EFI_ZBOOT_SIGNED
+ def_bool y
+ depends on EFI_ZBOOT_SIGNING_CERT != ""
+ depends on EFI_ZBOOT_SIGNING_KEY != ""
+
+config EFI_ZBOOT_SIGNING
+ bool "Sign the EFI decompressor for UEFI secure boot"
+ depends on EFI_ZBOOT
+ help
+ Use the 'sbsign' command line tool (which must exist on the host
+ path) to sign both the EFI decompressor PE/COFF image, as well as the
+ encapsulated PE/COFF image, which is subsequently compressed and
+ wrapped by the former image.
+
+config EFI_ZBOOT_SIGNING_CERT
+ string "Certificate to use for signing the compressed EFI boot image"
+ depends on EFI_ZBOOT_SIGNING
+
+config EFI_ZBOOT_SIGNING_KEY
+ string "Private key to use for signing the compressed EFI boot image"
+ depends on EFI_ZBOOT_SIGNING
+
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
- depends on EFI_GENERIC_STUB && !RISCV
+ depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH
default y
help
Select this config option to add support for the dtb= command
@@ -124,7 +165,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
bool "Enable the command line initrd loader" if !X86
depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
default y if X86
- depends on !RISCV
+ depends on !RISCV && !LOONGARCH
help
Select this config option to add support for the initrd= command
line parameter, allowing an initrd that resides on the same volume
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 3928dbff76d0..2fd770b499a3 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -51,34 +51,10 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
return addr;
}
-static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
-static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
-
-static const efi_config_table_type_t arch_tables[] __initconst = {
- {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
- {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
- {}
-};
+extern __weak const efi_config_table_type_t efi_arch_tables[];
static void __init init_screen_info(void)
{
- struct screen_info *si;
-
- if (IS_ENABLED(CONFIG_ARM) &&
- screen_info_table != EFI_INVALID_TABLE_ADDR) {
- si = early_memremap_ro(screen_info_table, sizeof(*si));
- if (!si) {
- pr_err("Could not map screen_info config table\n");
- return;
- }
- screen_info = *si;
- early_memunmap(si, sizeof(*si));
-
- /* dummycon on ARM needs non-zero values for columns/lines */
- screen_info.orig_video_cols = 80;
- screen_info.orig_video_lines = 25;
- }
-
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
memblock_is_map_memory(screen_info.lfb_base))
memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
@@ -119,8 +95,7 @@ static int __init uefi_init(u64 efi_system_table)
goto out;
}
retval = efi_config_parse_tables(config_tables, systab->nr_tables,
- IS_ENABLED(CONFIG_ARM) ? arch_tables
- : NULL);
+ efi_arch_tables);
early_memunmap(config_tables, table_size);
out:
@@ -248,36 +223,4 @@ void __init efi_init(void)
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
init_screen_info();
-
-#ifdef CONFIG_ARM
- /* ARM does not permit early mappings to persist across paging_init() */
- efi_memmap_unmap();
-
- if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
- struct efi_arm_entry_state *state;
- bool dump_state = true;
-
- state = early_memremap_ro(cpu_state_table,
- sizeof(struct efi_arm_entry_state));
- if (state == NULL) {
- pr_warn("Unable to map CPU entry state table.\n");
- return;
- }
-
- if ((state->sctlr_before_ebs & 1) == 0)
- pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
- else if ((state->sctlr_after_ebs & 1) == 0)
- pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
- else
- dump_state = false;
-
- if (dump_state || efi_enabled(EFI_DBG)) {
- pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs);
- pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs);
- pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
- pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
- }
- early_memunmap(state, sizeof(struct efi_arm_entry_state));
- }
-#endif
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e4080ad96089..9624735f1575 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -21,6 +21,7 @@
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/of.h>
+#include <linux/initrd.h>
#include <linux/io.h>
#include <linux/kexec.h>
#include <linux/platform_device.h>
@@ -55,9 +56,10 @@ EXPORT_SYMBOL(efi);
unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
@@ -532,6 +534,7 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
{LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
@@ -674,6 +677,18 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
}
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+ initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
+ struct linux_efi_initrd *tbl;
+
+ tbl = early_memremap(initrd, sizeof(*tbl));
+ if (tbl) {
+ phys_initrd_start = tbl->base;
+ phys_initrd_size = tbl->size;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
return 0;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b43fdb319fd4..b1601aad7e1a 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -26,8 +26,10 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
$(call cc-option,-mno-single-pic-base)
cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fpic
+cflags-$(CONFIG_LOONGARCH) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie
-cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
+cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
-include $(srctree)/include/linux/hidden.h \
@@ -55,6 +57,7 @@ GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
@@ -66,21 +69,32 @@ lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
alignedmem.o relocate.o vsprintf.o
-# include the stub's generic dependencies from lib/ when building for ARM/arm64
-efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+# include the stub's libfdt dependencies from lib/ when needed
+libfdt-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c \
+ fdt_empty_tree.c fdt_sw.c
+
+lib-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdt.o \
+ $(patsubst %.c,lib-%.o,$(libfdt-deps))
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
- $(patsubst %.c,lib-%.o,$(efi-deps-y))
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv-stub.o
+lib-$(CONFIG_LOONGARCH) += loongarch-stub.o
+
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
+
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
+
# Even when -mbranch-protection=none is set, Clang will generate a
# .note.gnu.property for code-less object files (like lib/ctype.c),
# so work around this by explicitly removing the unwanted section.
@@ -120,9 +134,6 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
# a verification pass to see if any absolute relocations exist in any of the
# object files.
#
-extra-y := $(lib-y)
-lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
-
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -134,6 +145,12 @@ STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+# For LoongArch, keep all the symbols in .init section and make sure that no
+# absolute symbols references exist.
+STUBCOPY_FLAGS-$(CONFIG_LOONGARCH) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_LOONGARCH) := R_LARCH_MARK_LA
+
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,stubcopy)
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
new file mode 100644
index 000000000000..35f234ad8738
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# to be include'd by arch/$(ARCH)/boot/Makefile after setting
+# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET and EFI_ZBOOT_MACH_TYPE
+
+comp-type-$(CONFIG_KERNEL_GZIP) := gzip
+comp-type-$(CONFIG_KERNEL_LZ4) := lz4
+comp-type-$(CONFIG_KERNEL_LZMA) := lzma
+comp-type-$(CONFIG_KERNEL_LZO) := lzo
+comp-type-$(CONFIG_KERNEL_XZ) := xzkern
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
+
+# in GZIP, the appended le32 carrying the uncompressed size is part of the
+# format, but in other cases, we just append it at the end for convenience,
+# causing the original tools to complain when checking image integrity.
+# So disregard it when calculating the payload size in the zimage header.
+zboot-method-y := $(comp-type-y)_with_size
+zboot-size-len-y := 4
+
+zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
+zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+
+quiet_cmd_sbsign = SBSIGN $@
+ cmd_sbsign = sbsign --out $@ $< \
+ --key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \
+ --cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT)
+
+$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
+ $(call if_changed,sbsign)
+
+ZBOOT_PAYLOAD-y := $(EFI_ZBOOT_PAYLOAD)
+ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed
+
+$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE
+ $(call if_changed,$(zboot-method-y))
+
+OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \
+ --rename-section .data=.gzdata,load,alloc,readonly,contents
+$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
+ $(call if_changed,objcopy)
+
+AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
+ -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
+ -DCOMP_TYPE="\"$(comp-type-y)\""
+
+$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
+$(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE
+ $(call if_changed,ld)
+
+ZBOOT_EFI-y := vmlinuz.efi
+ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED) := vmlinuz.efi.unsigned
+
+OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary
+$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE
+ $(call if_changed,objcopy)
+
+targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+
+ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),)
+$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE
+ $(call if_changed,sbsign)
+endif
+
+targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 60973e84d7ab..259e4b852d63 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -19,6 +19,14 @@ efi_status_t check_platform_features(void)
{
u64 tg;
+ /*
+ * If we have 48 bits of VA space for TTBR0 mappings, we can map the
+ * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
+ * unnecessary.
+ */
+ if (VA_BITS_MIN >= 48)
+ efi_novamap = true;
+
/* UEFI mandates support for 4 KB granularity, no need to check */
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS;
@@ -42,26 +50,17 @@ efi_status_t check_platform_features(void)
*/
static bool check_image_region(u64 base, u64 size)
{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *memory_map;
- struct efi_boot_memmap map;
+ struct efi_boot_memmap *map;
efi_status_t status;
bool ret = false;
int map_offset;
- map.map = &memory_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = NULL;
- map.key_ptr = NULL;
- map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return false;
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
/*
@@ -74,7 +73,7 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, memory_map);
+ efi_bs_call(free_pool, map);
return ret;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3d972061c1b0..0c493521b25b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -218,7 +218,7 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_noinitrd = true;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
- efi_novamap = parse_option_str(val, "novamap");
+ efi_novamap |= parse_option_str(val, "novamap");
efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
parse_option_str(val, "nosoftreserve");
@@ -310,7 +310,7 @@ bool efi_load_option_unpack(efi_load_option_unpacked_t *dest,
*
* Detect this case and extract OptionalData.
*/
-void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size)
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size)
{
const efi_load_option_t *load_option = *load_options;
efi_load_option_unpacked_t load_option_unpacked;
@@ -334,6 +334,85 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si
*load_options_size = load_option_unpacked.optional_data_size;
}
+enum efistub_event {
+ EFISTUB_EVT_INITRD,
+ EFISTUB_EVT_LOAD_OPTIONS,
+ EFISTUB_EVT_COUNT,
+};
+
+#define STR_WITH_SIZE(s) sizeof(s), s
+
+static const struct {
+ u32 pcr_index;
+ u32 event_id;
+ u32 event_data_len;
+ u8 event_data[52];
+} events[] = {
+ [EFISTUB_EVT_INITRD] = {
+ 9,
+ INITRD_EVENT_TAG_ID,
+ STR_WITH_SIZE("Linux initrd")
+ },
+ [EFISTUB_EVT_LOAD_OPTIONS] = {
+ 9,
+ LOAD_OPTIONS_EVENT_TAG_ID,
+ STR_WITH_SIZE("LOADED_IMAGE::LoadOptions")
+ },
+};
+
+static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
+ unsigned long load_size,
+ enum efistub_event event)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_tcg2_protocol_t *tcg2 = NULL;
+ efi_status_t status;
+
+ efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
+ if (tcg2) {
+ struct efi_measured_event {
+ efi_tcg2_event_t event_data;
+ efi_tcg2_tagged_event_t tagged_event;
+ u8 tagged_event_data[];
+ } *evt;
+ int size = sizeof(*evt) + events[event].event_data_len;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&evt);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ evt->event_data = (struct efi_tcg2_event){
+ .event_size = size,
+ .event_header.header_size = sizeof(evt->event_data.event_header),
+ .event_header.header_version = EFI_TCG2_EVENT_HEADER_VERSION,
+ .event_header.pcr_index = events[event].pcr_index,
+ .event_header.event_type = EV_EVENT_TAG,
+ };
+
+ evt->tagged_event = (struct efi_tcg2_tagged_event){
+ .tagged_event_id = events[event].event_id,
+ .tagged_event_data_size = events[event].event_data_len,
+ };
+
+ memcpy(evt->tagged_event_data, events[event].event_data,
+ events[event].event_data_len);
+
+ status = efi_call_proto(tcg2, hash_log_extend_event, 0,
+ load_addr, load_size, &evt->event_data);
+ efi_bs_call(free_pool, evt);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+ return EFI_SUCCESS;
+ }
+
+ return EFI_UNSUPPORTED;
+fail:
+ efi_warn("Failed to measure data for event %d: 0x%lx\n", event, status);
+ return status;
+}
+
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -341,21 +420,26 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si
*/
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
{
- const u16 *s2;
- unsigned long cmdline_addr = 0;
- int options_chars = efi_table_attr(image, load_options_size);
- const u16 *options = efi_table_attr(image, load_options);
+ const efi_char16_t *options = efi_table_attr(image, load_options);
+ u32 options_size = efi_table_attr(image, load_options_size);
int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ unsigned long cmdline_addr = 0;
+ const efi_char16_t *s2;
bool in_quote = false;
efi_status_t status;
+ u32 options_chars;
+
+ if (options_size > 0)
+ efi_measure_tagged_event((unsigned long)options, options_size,
+ EFISTUB_EVT_LOAD_OPTIONS);
- efi_apply_loadoptions_quirk((const void **)&options, &options_chars);
- options_chars /= sizeof(*options);
+ efi_apply_loadoptions_quirk((const void **)&options, &options_size);
+ options_chars = options_size / sizeof(efi_char16_t);
if (options) {
s2 = options;
while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
- u16 c = *s2++;
+ efi_char16_t c = *s2++;
if (c < 0x80) {
if (c == L'\0' || c == L'\n')
@@ -419,7 +503,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
/**
* efi_exit_boot_services() - Exit boot services
* @handle: handle of the exiting image
- * @map: pointer to receive the memory map
* @priv: argument to be passed to @priv_func
* @priv_func: function to process the memory map before exiting boot services
*
@@ -432,26 +515,26 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
*
* Return: status code
*/
-efi_status_t efi_exit_boot_services(void *handle,
- struct efi_boot_memmap *map,
- void *priv,
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
efi_exit_boot_map_processing priv_func)
{
+ struct efi_boot_memmap *map;
efi_status_t status;
- status = efi_get_memory_map(map);
-
+ status = efi_get_memory_map(&map, true);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
status = priv_func(map, priv);
- if (status != EFI_SUCCESS)
- goto free_map;
+ if (status != EFI_SUCCESS) {
+ efi_bs_call(free_pool, map);
+ return status;
+ }
if (efi_disable_pci_dma)
efi_pci_disable_bridge_busmaster();
- status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
if (status == EFI_INVALID_PARAMETER) {
/*
@@ -467,35 +550,26 @@ efi_status_t efi_exit_boot_services(void *handle,
* buffer should account for any changes in the map so the call
* to get_memory_map() is expected to succeed here.
*/
- *map->map_size = *map->buff_size;
+ map->map_size = map->buff_size;
status = efi_bs_call(get_memory_map,
- map->map_size,
- *map->map,
- map->key_ptr,
- map->desc_size,
- map->desc_ver);
+ &map->map_size,
+ &map->map,
+ &map->map_key,
+ &map->desc_size,
+ &map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
status = priv_func(map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
- status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
}
- /* exit_boot_services() was called, thus cannot free */
- if (status != EFI_SUCCESS)
- goto fail;
-
- return EFI_SUCCESS;
-
-free_map:
- efi_bs_call(free_pool, *map->map);
-fail:
return status;
}
@@ -560,20 +634,16 @@ static const struct {
* * %EFI_SUCCESS if the initrd was loaded successfully, in which
* case @load_addr and @load_size are assigned accordingly
* * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
- * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
* * %EFI_OUT_OF_RESOURCES if memory allocation failed
* * %EFI_LOAD_ERROR in all other cases
*/
static
-efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
- unsigned long *load_size,
+efi_status_t efi_load_initrd_dev_path(struct linux_efi_initrd *initrd,
unsigned long max)
{
efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID;
efi_device_path_protocol_t *dp;
efi_load_file2_protocol_t *lf2;
- unsigned long initrd_addr;
- unsigned long initrd_size;
efi_handle_t handle;
efi_status_t status;
@@ -587,124 +657,98 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL);
+ initrd->size = 0;
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size, NULL);
if (status != EFI_BUFFER_TOO_SMALL)
return EFI_LOAD_ERROR;
- status = efi_allocate_pages(initrd_size, &initrd_addr, max);
+ status = efi_allocate_pages(initrd->size, &initrd->base, max);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(lf2, load_file, dp, false, &initrd_size,
- (void *)initrd_addr);
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size,
+ (void *)initrd->base);
if (status != EFI_SUCCESS) {
- efi_free(initrd_size, initrd_addr);
+ efi_free(initrd->size, initrd->base);
return EFI_LOAD_ERROR;
}
-
- *load_addr = initrd_addr;
- *load_size = initrd_size;
return EFI_SUCCESS;
}
static
efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
+ struct linux_efi_initrd *initrd,
unsigned long soft_limit,
unsigned long hard_limit)
{
if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) ||
- (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) {
- *load_addr = *load_size = 0;
- return EFI_SUCCESS;
- }
+ (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL)))
+ return EFI_UNSUPPORTED;
return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
soft_limit, hard_limit,
- load_addr, load_size);
-}
-
-static const struct {
- efi_tcg2_event_t event_data;
- efi_tcg2_tagged_event_t tagged_event;
- u8 tagged_event_data[];
-} initrd_tcg2_event = {
- {
- sizeof(initrd_tcg2_event) + sizeof("Linux initrd"),
- {
- sizeof(initrd_tcg2_event.event_data.event_header),
- EFI_TCG2_EVENT_HEADER_VERSION,
- 9,
- EV_EVENT_TAG,
- },
- },
- {
- INITRD_EVENT_TAG_ID,
- sizeof("Linux initrd"),
- },
- { "Linux initrd" },
-};
-
-static void efi_measure_initrd(unsigned long load_addr, unsigned long load_size)
-{
- efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
- efi_tcg2_protocol_t *tcg2 = NULL;
- efi_status_t status;
-
- efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
- if (tcg2) {
- status = efi_call_proto(tcg2, hash_log_extend_event,
- 0, load_addr, load_size,
- &initrd_tcg2_event.event_data);
- if (status != EFI_SUCCESS)
- efi_warn("Failed to measure initrd data: 0x%lx\n",
- status);
- else
- efi_info("Measured initrd data into PCR %d\n",
- initrd_tcg2_event.event_data.event_header.pcr_index);
- }
+ &initrd->base, &initrd->size);
}
/**
* efi_load_initrd() - Load initial RAM disk
* @image: EFI loaded image protocol
- * @load_addr: pointer to loaded initrd
- * @load_size: size of loaded initrd
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
*
* Return: status code
*/
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
unsigned long soft_limit,
- unsigned long hard_limit)
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out)
{
- efi_status_t status;
+ efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID;
+ efi_status_t status = EFI_SUCCESS;
+ struct linux_efi_initrd initrd, *tbl;
- if (efi_noinitrd) {
- *load_addr = *load_size = 0;
- status = EFI_SUCCESS;
- } else {
- status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
- if (status == EFI_SUCCESS) {
- efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- if (*load_size > 0)
- efi_measure_initrd(*load_addr, *load_size);
- } else if (status == EFI_NOT_FOUND) {
- status = efi_load_initrd_cmdline(image, load_addr, load_size,
- soft_limit, hard_limit);
- if (status == EFI_SUCCESS && *load_size > 0)
- efi_info("Loaded initrd from command line option\n");
- }
- if (status != EFI_SUCCESS) {
- efi_err("Failed to load initrd: 0x%lx\n", status);
- *load_addr = *load_size = 0;
- }
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || efi_noinitrd)
+ return EFI_SUCCESS;
+
+ status = efi_load_initrd_dev_path(&initrd, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
+ hard_limit);
+ /* command line loader disabled or no initrd= passed? */
+ if (status == EFI_UNSUPPORTED || status == EFI_NOT_READY)
+ return EFI_SUCCESS;
+ if (status == EFI_SUCCESS)
+ efi_info("Loaded initrd from command line option\n");
}
+ if (status != EFI_SUCCESS)
+ goto failed;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
+ (void **)&tbl);
+ if (status != EFI_SUCCESS)
+ goto free_initrd;
+
+ *tbl = initrd;
+ status = efi_bs_call(install_configuration_table, &tbl_guid, tbl);
+ if (status != EFI_SUCCESS)
+ goto free_tbl;
+
+ if (out)
+ *out = tbl;
+ return EFI_SUCCESS;
+free_tbl:
+ efi_bs_call(free_pool, tbl);
+free_initrd:
+ efi_free(initrd.size, initrd.base);
+failed:
+ efi_err("Failed to load initrd: 0x%lx\n", status);
return status;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index f515394cce6e..cf474f0dd261 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,7 +10,6 @@
*/
#include <linux/efi.h>
-#include <linux/libfdt.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -40,16 +39,22 @@
#ifdef CONFIG_ARM64
# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
-#elif defined(CONFIG_RISCV)
+#elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH)
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN
-#else
+#else /* Only if TASK_SIZE is a constant */
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
#endif
-static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-static bool flat_va_mapping;
+/*
+ * Some architectures map the EFI regions into the kernel's linear map using a
+ * fixed offset.
+ */
+#ifndef EFI_RT_VIRTUAL_OFFSET
+#define EFI_RT_VIRTUAL_OFFSET 0
+#endif
-const efi_system_table_t *efi_system_table;
+static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
static struct screen_info *setup_graphics(void)
{
@@ -124,16 +129,11 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long image_addr;
unsigned long image_size = 0;
/* addr/point and size pairs for memory management*/
- unsigned long initrd_addr = 0;
- unsigned long initrd_size = 0;
- unsigned long fdt_addr = 0; /* Original DTB */
- unsigned long fdt_size = 0;
char *cmdline_ptr = NULL;
int cmdline_size = 0;
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
- enum efi_secureboot_mode secure_boot;
struct screen_info *si;
efi_properties_table_t *prop_tbl;
@@ -154,8 +154,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
* information about the running image, such as size and the command
* line.
*/
- status = efi_system_table->boottime->handle_protocol(handle,
- &loaded_image_proto, (void *)&image);
+ status = efi_bs_call(handle_protocol, handle, &loaded_image_proto,
+ (void *)&image);
if (status != EFI_SUCCESS) {
efi_err("Failed to get loaded image protocol\n");
goto fail;
@@ -209,40 +209,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation();
- secure_boot = efi_get_secureboot();
-
- /*
- * Unauthenticated device tree data is a security hazard, so ignore
- * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
- * boot is enabled if we can't determine its state.
- */
- if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
- secure_boot != efi_secureboot_mode_disabled) {
- if (strstr(cmdline_ptr, "dtb="))
- efi_err("Ignoring DTB from command line.\n");
- } else {
- status = efi_load_dtb(image, &fdt_addr, &fdt_size);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to load device tree!\n");
- goto fail_free_image;
- }
- }
-
- if (fdt_addr) {
- efi_info("Using DTB from command line\n");
- } else {
- /* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(&fdt_size);
- if (fdt_addr)
- efi_info("Using DTB from configuration table\n");
- }
-
- if (!fdt_addr)
- efi_info("Generating empty DTB\n");
-
- efi_load_initrd(image, &initrd_addr, &initrd_size, ULONG_MAX,
- efi_get_max_initrd_addr(image_addr));
+ efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
+ NULL);
efi_random_get_seed();
@@ -254,8 +222,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
* The easiest way to achieve that is to simply use a 1:1 mapping.
*/
prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID);
- flat_va_mapping = prop_tbl &&
- (prop_tbl->memory_protection_attribute &
+ flat_va_mapping |= prop_tbl &&
+ (prop_tbl->memory_protection_attribute &
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
/* force efi_novamap if SetVirtualAddressMap() is unsupported */
@@ -284,25 +252,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
install_memreserve_table();
- status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
- initrd_addr, initrd_size,
- cmdline_ptr, fdt_addr, fdt_size);
- if (status != EFI_SUCCESS)
- goto fail_free_initrd;
-
- if (IS_ENABLED(CONFIG_ARM))
- efi_handle_post_ebs_state();
-
- efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
- /* not reached */
-
-fail_free_initrd:
- efi_err("Failed to update FDT and exit boot services\n");
+ status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr);
- efi_free(initrd_size, initrd_addr);
- efi_free(fdt_size, fdt_addr);
-
-fail_free_image:
efi_free(image_size, image_addr);
efi_free(reserve_size, reserve_addr);
fail_free_screeninfo:
@@ -314,6 +265,35 @@ fail:
}
/*
+ * efi_allocate_virtmap() - create a pool allocation for the virtmap
+ *
+ * Create an allocation that is of sufficient size to hold all the memory
+ * descriptors that will be passed to SetVirtualAddressMap() to inform the
+ * firmware about the virtual mapping that will be used under the OS to call
+ * into the firmware.
+ */
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver)
+{
+ unsigned long size, mmap_key;
+ efi_status_t status;
+
+ /*
+ * Use the size of the current memory map as an upper bound for the
+ * size of the buffer we need to pass to SetVirtualAddressMap() to
+ * cover all EFI_MEMORY_RUNTIME regions.
+ */
+ size = 0;
+ status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size,
+ desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)virtmap);
+}
+
+/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
* This function populates the virt_addr fields of all memory region descriptors
@@ -328,6 +308,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
efi_memory_desc_t *in, *out = runtime_map;
int l;
+ *count = 0;
+
for (l = 0; l < map_size; l += desc_size) {
u64 paddr, size;
@@ -338,7 +320,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
paddr = in->phys_addr;
size = in->num_pages * EFI_PAGE_SIZE;
- in->virt_addr = in->phys_addr;
+ in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET;
if (efi_novamap) {
continue;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b0ae0a454404..a30fb5d8ef05 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -160,16 +160,24 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
*/
#define EFI_MMAP_NR_SLACK_SLOTS 8
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
+typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+
+union efi_device_path_to_text_protocol {
+ struct {
+ efi_char16_t *(__efiapi *convert_device_node_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ efi_char16_t *(__efiapi *convert_device_path_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ };
+ struct {
+ u32 convert_device_node_to_text;
+ u32 convert_device_path_to_text;
+ } mixed_mode;
};
-typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+typedef union efi_device_path_to_text_protocol efi_device_path_to_text_protocol_t;
typedef void *efi_event_t;
/* Note that notifications won't work in mixed mode */
@@ -254,13 +262,17 @@ union efi_boot_services {
efi_handle_t *);
efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
void *);
- void *load_image;
- void *start_image;
+ efi_status_t (__efiapi *load_image)(bool, efi_handle_t,
+ efi_device_path_protocol_t *,
+ void *, unsigned long,
+ efi_handle_t *);
+ efi_status_t (__efiapi *start_image)(efi_handle_t, unsigned long *,
+ efi_char16_t **);
efi_status_t __noreturn (__efiapi *exit)(efi_handle_t,
efi_status_t,
unsigned long,
efi_char16_t *);
- void *unload_image;
+ efi_status_t (__efiapi *unload_image)(efi_handle_t);
efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
unsigned long);
void *get_next_monotonic_count;
@@ -277,11 +289,11 @@ union efi_boot_services {
void *locate_handle_buffer;
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
+ efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
+ efi_status_t (__efiapi *uninstall_multiple_protocol_interfaces)(efi_handle_t, ...);
void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
+ void (__efiapi *copy_mem)(void *, const void *, unsigned long);
+ void (__efiapi *set_mem)(void *, unsigned long, unsigned char);
void *create_event_ex;
};
struct {
@@ -741,6 +753,7 @@ union apple_properties_protocol {
typedef u32 efi_tcg2_event_log_format;
#define INITRD_EVENT_TAG_ID 0x8F3B22ECU
+#define LOAD_OPTIONS_EVENT_TAG_ID 0x8F3B22EDU
#define EV_EVENT_TAG 0x00000006U
#define EFI_TCG2_EVENT_HEADER_VERSION 0x1
@@ -840,7 +853,7 @@ typedef struct {
u16 file_path_list_length;
const efi_char16_t *description;
const efi_device_path_protocol_t *file_path_list;
- size_t optional_data_size;
+ u32 optional_data_size;
const void *optional_data;
} efi_load_option_unpacked_t;
@@ -850,20 +863,16 @@ typedef efi_status_t (*efi_exit_boot_map_processing)(
struct efi_boot_memmap *map,
void *priv);
-efi_status_t efi_exit_boot_services(void *handle,
- struct efi_boot_memmap *map,
- void *priv,
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
efi_exit_boot_map_processing priv_func);
-efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
- unsigned long *new_fdt_addr,
- u64 initrd_addr, u64 initrd_size,
- char *cmdline_ptr,
- unsigned long fdt_addr,
- unsigned long fdt_size);
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr);
void *get_fdt(unsigned long *fdt_size);
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
@@ -885,11 +894,12 @@ __printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
-void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size);
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl);
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max);
@@ -932,10 +942,9 @@ static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
}
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
unsigned long soft_limit,
- unsigned long hard_limit);
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out);
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index fe567be0f118..4f4d98e51fbf 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -28,8 +28,7 @@ static void fdt_update_cell_size(void *fdt)
}
static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size)
+ void *fdt, int new_fdt_size, char *cmdline_ptr)
{
int node, num_rsv;
int status;
@@ -93,21 +92,6 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
goto fdt_set_fail;
}
- /* Set initrd address/end in device tree, if present */
- if (initrd_size != 0) {
- u64 initrd_image_end;
- u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
-
- status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
- if (status)
- goto fdt_set_fail;
-
- initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
- status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
- if (status)
- goto fdt_set_fail;
- }
-
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
@@ -170,25 +154,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
if (node < 0)
return EFI_LOAD_ERROR;
- fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+ fdt_val64 = cpu_to_fdt64((unsigned long)map->map);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->map_size);
+ fdt_val32 = cpu_to_fdt32(map->map_size);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->desc_size);
+ fdt_val32 = cpu_to_fdt32(map->desc_size);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+ fdt_val32 = cpu_to_fdt32(map->desc_ver);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (err)
@@ -198,22 +182,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
}
struct exit_boot_struct {
+ struct efi_boot_memmap *boot_memmap;
efi_memory_desc_t *runtime_map;
- int *runtime_entry_count;
+ int runtime_entry_count;
void *new_fdt_addr;
};
-static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
- void *priv)
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
{
struct exit_boot_struct *p = priv;
+
+ p->boot_memmap = map;
+
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight to SetVirtualAddressMap()
*/
- efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
- p->runtime_map, p->runtime_entry_count);
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
return update_fdt_memmap(p->new_fdt_addr, map);
}
@@ -223,86 +210,86 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
#endif
/*
- * Allocate memory for a new FDT, then add EFI, commandline, and
- * initrd related fields to the FDT. This routine increases the
- * FDT allocation size until the allocated memory is large
- * enough. EFI allocations are in EFI_PAGE_SIZE granules,
- * which are fixed at 4K bytes, so in most cases the first
- * allocation should succeed.
- * EFI boot services are exited at the end of this function.
- * There must be no allocations between the get_memory_map()
- * call and the exit_boot_services() call, so the exiting of
- * boot services is very tightly tied to the creation of the FDT
- * with the final memory map in it.
+ * Allocate memory for a new FDT, then add EFI and commandline related fields
+ * to the FDT. This routine increases the FDT allocation size until the
+ * allocated memory is large enough. EFI allocations are in EFI_PAGE_SIZE
+ * granules, which are fixed at 4K bytes, so in most cases the first allocation
+ * should succeed. EFI boot services are exited at the end of this function.
+ * There must be no allocations between the get_memory_map() call and the
+ * exit_boot_services() call, so the exiting of boot services is very tightly
+ * tied to the creation of the FDT with the final memory map in it.
*/
-
+static
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ efi_loaded_image_t *image,
unsigned long *new_fdt_addr,
- u64 initrd_addr, u64 initrd_size,
- char *cmdline_ptr,
- unsigned long fdt_addr,
- unsigned long fdt_size)
+ char *cmdline_ptr)
{
- unsigned long map_size, desc_size, buff_size;
+ unsigned long desc_size;
u32 desc_ver;
- unsigned long mmap_key;
- efi_memory_desc_t *memory_map, *runtime_map;
efi_status_t status;
- int runtime_entry_count;
- struct efi_boot_memmap map;
struct exit_boot_struct priv;
+ unsigned long fdt_addr = 0;
+ unsigned long fdt_size = 0;
- map.map = &runtime_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_ver;
- map.key_ptr = &mmap_key;
- map.buff_size = &buff_size;
+ if (!efi_novamap) {
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size,
+ &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+ }
/*
- * Get a copy of the current memory map that we will use to prepare
- * the input for SetVirtualAddressMap(). We don't have to worry about
- * subsequent allocations adding entries, since they could not affect
- * the number of EFI_MEMORY_RUNTIME regions.
+ * Unauthenticated device tree data is a security hazard, so ignore
+ * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
+ * boot is enabled if we can't determine its state.
*/
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS) {
- efi_err("Unable to retrieve UEFI memory map.\n");
- return status;
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ efi_get_secureboot() != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ efi_err("Ignoring DTB from command line.\n");
+ } else {
+ status = efi_load_dtb(image, &fdt_addr, &fdt_size);
+
+ if (status != EFI_SUCCESS && status != EFI_NOT_READY) {
+ efi_err("Failed to load device tree!\n");
+ goto fail;
+ }
}
+ if (fdt_addr) {
+ efi_info("Using DTB from command line\n");
+ } else {
+ /* Look for a device tree configuration table entry. */
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
+ if (fdt_addr)
+ efi_info("Using DTB from configuration table\n");
+ }
+
+ if (!fdt_addr)
+ efi_info("Generating empty DTB\n");
+
efi_info("Exiting boot services...\n");
- map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
if (status != EFI_SUCCESS) {
efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for exit_boot_services().
- */
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt((void *)fdt_addr, fdt_size,
- (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
- initrd_addr, initrd_size);
+ (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr);
if (status != EFI_SUCCESS) {
efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
- runtime_entry_count = 0;
- priv.runtime_map = runtime_map;
- priv.runtime_entry_count = &runtime_entry_count;
- priv.new_fdt_addr = (void *)*new_fdt_addr;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
@@ -312,8 +299,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
/* Install the new virtual address map */
svam = efi_system_table->runtime->set_virtual_address_map;
- status = svam(runtime_entry_count * desc_size, desc_size,
- desc_ver, runtime_map);
+ status = svam(priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
/*
* We are beyond the point of no return here, so if the call to
@@ -321,6 +308,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
* incoming kernel but proceed normally otherwise.
*/
if (status != EFI_SUCCESS) {
+ efi_memory_desc_t *p;
int l;
/*
@@ -329,8 +317,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
* the incoming kernel that no virtual translation has
* been installed.
*/
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *p = (void *)memory_map + l;
+ for (l = 0; l < priv.boot_memmap->map_size;
+ l += priv.boot_memmap->desc_size) {
+ p = (void *)priv.boot_memmap->map + l;
if (p->attribute & EFI_MEMORY_RUNTIME)
p->virt_addr = 0;
@@ -345,11 +334,33 @@ fail_free_new_fdt:
efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- efi_system_table->boottime->free_pool(runtime_map);
+ efi_free(fdt_size, fdt_addr);
+
+ efi_bs_call(free_pool, priv.runtime_map);
return EFI_LOAD_ERROR;
}
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ unsigned long fdt_addr;
+ efi_status_t status;
+
+ status = allocate_new_fdt_and_exit_boot(handle, image, &fdt_addr,
+ cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to update FDT and exit boot services\n");
+ return status;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
+ efi_enter_kernel(kernel_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
+ /* not reached */
+}
+
void *get_fdt(unsigned long *fdt_size)
{
void *fdt;
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index dd95f330fe6e..f756c61396e9 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -66,10 +66,28 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
static efi_status_t efi_open_volume(efi_loaded_image_t *image,
efi_file_protocol_t **fh)
{
+ struct efi_vendor_dev_path *dp = image->file_path;
+ efi_guid_t li_proto = LOADED_IMAGE_PROTOCOL_GUID;
efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
efi_simple_file_system_protocol_t *io;
efi_status_t status;
+ // If we are using EFI zboot, we should look for the file system
+ // protocol on the parent image's handle instead
+ if (IS_ENABLED(CONFIG_EFI_ZBOOT) &&
+ image->parent_handle != NULL &&
+ dp != NULL &&
+ dp->header.type == EFI_DEV_MEDIA &&
+ dp->header.sub_type == EFI_DEV_MEDIA_VENDOR &&
+ !efi_guidcmp(dp->vendorguid, LINUX_EFI_ZBOOT_MEDIA_GUID)) {
+ status = efi_bs_call(handle_protocol, image->parent_handle,
+ &li_proto, (void *)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to locate parent image handle\n");
+ return status;
+ }
+ }
+
status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
(void **)&io);
if (status != EFI_SUCCESS) {
@@ -136,7 +154,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_size)
{
const efi_char16_t *cmdline = image->load_options;
- int cmdline_len = image->load_options_size;
+ u32 cmdline_len = image->load_options_size;
unsigned long efi_chunk_size = ULONG_MAX;
efi_file_protocol_t *volume = NULL;
efi_file_protocol_t *file;
@@ -238,6 +256,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
if (volume)
volume->close(volume);
+
+ if (*load_size == 0)
+ return EFI_NOT_READY;
return EFI_SUCCESS;
err_close_file:
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
new file mode 100644
index 000000000000..a04ab39292b6
--- /dev/null
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/string.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_KASAN
+#undef memcpy
+#undef memmove
+#undef memset
+void *__memcpy(void *__dest, const void *__src, size_t __n) __alias(memcpy);
+void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
+void *__memset(void *s, int c, size_t count) __alias(memset);
+#endif
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ efi_bs_call(copy_mem, dst, src, len);
+ return dst;
+}
+
+extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
+
+void *memset(void *dst, int c, size_t len)
+{
+ efi_bs_call(set_mem, dst, len, c & U8_MAX);
+ return dst;
+}
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
new file mode 100644
index 000000000000..32329f2a92f9
--- /dev/null
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Yun Liu <liuyun@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/efi.h>
+#include <asm/addrspace.h>
+#include "efistub.h"
+
+typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline,
+ unsigned long systab);
+
+extern int kernel_asize;
+extern int kernel_fsize;
+extern int kernel_offset;
+extern kernel_entry_t kernel_entry;
+
+efi_status_t check_platform_features(void)
+{
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ efi_status_t status;
+ unsigned long kernel_addr = 0;
+
+ kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
+
+ status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
+ PHYSADDR(VMLINUX_LOAD_ADDRESS), SZ_2M, 0x0);
+
+ *image_addr = kernel_addr;
+ *image_size = kernel_asize;
+
+ return status;
+}
+
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+{
+ struct exit_boot_struct *p = priv;
+
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ kernel_entry_t real_kernel_entry;
+ struct exit_boot_struct priv;
+ unsigned long desc_size;
+ efi_status_t status;
+ u32 desc_ver;
+
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size, &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+
+ efi_info("Exiting boot services\n");
+
+ efi_novamap = false;
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Install the new virtual address map */
+ efi_rt_call(set_virtual_address_map,
+ priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
+
+ /* Config Direct Mapping */
+ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
+ csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+
+ real_kernel_entry = (kernel_entry_t)
+ ((unsigned long)&kernel_entry - kernel_addr + VMLINUX_LOAD_ADDRESS);
+
+ real_kernel_entry(true, (unsigned long)cmdline_ptr,
+ (unsigned long)efi_system_table);
+}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index feef8d4be113..45841ef55a9f 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -5,71 +5,66 @@
#include "efistub.h"
-static inline bool mmap_has_headroom(unsigned long buff_size,
- unsigned long map_size,
- unsigned long desc_size)
-{
- unsigned long slack = buff_size - map_size;
-
- return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
-}
-
/**
* efi_get_memory_map() - get memory map
- * @map: on return pointer to memory map
+ * @map: pointer to memory map pointer to which to assign the
+ * newly allocated memory map
+ * @install_cfg_tbl: whether or not to install the boot memory map as a
+ * configuration table
*
* Retrieve the UEFI memory map. The allocated memory leaves room for
* up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries.
*
* Return: status code
*/
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl)
{
- efi_memory_desc_t *m = NULL;
+ int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
+ : EFI_LOADER_DATA;
+ efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
+ struct efi_boot_memmap *m, tmp;
efi_status_t status;
- unsigned long key;
- u32 desc_version;
-
- *map->desc_size = sizeof(*m);
- *map->map_size = *map->desc_size * 32;
- *map->buff_size = *map->map_size;
-again:
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
+ unsigned long size;
+
+ tmp.map_size = 0;
+ status = efi_bs_call(get_memory_map, &tmp.map_size, NULL, &tmp.map_key,
+ &tmp.desc_size, &tmp.desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ size = tmp.map_size + tmp.desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ status = efi_bs_call(allocate_pool, memtype, sizeof(*m) + size,
+ (void **)&m);
if (status != EFI_SUCCESS)
- goto fail;
-
- *map->desc_size = 0;
- key = 0;
- status = efi_bs_call(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL ||
- !mmap_has_headroom(*map->buff_size, *map->map_size,
- *map->desc_size)) {
- efi_bs_call(free_pool, m);
+ return status;
+
+ if (install_cfg_tbl) {
/*
- * Make sure there is some entries of headroom so that the
- * buffer can be reused for a new map after allocations are
- * no longer permitted. Its unlikely that the map will grow to
- * exceed this headroom once we are ready to trigger
- * ExitBootServices()
+ * Installing a configuration table might allocate memory, and
+ * this may modify the memory map. This means we should install
+ * the configuration table first, and re-install or delete it
+ * as needed.
*/
- *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
- *map->buff_size = *map->map_size;
- goto again;
+ status = efi_bs_call(install_configuration_table, &tbl_guid, m);
+ if (status != EFI_SUCCESS)
+ goto free_map;
}
- if (status == EFI_SUCCESS) {
- if (map->key_ptr)
- *map->key_ptr = key;
- if (map->desc_ver)
- *map->desc_ver = desc_version;
- } else {
- efi_bs_call(free_pool, m);
- }
+ m->buff_size = m->map_size = size;
+ status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
+ &m->desc_size, &m->desc_ver);
+ if (status != EFI_SUCCESS)
+ goto uninstall_table;
+
+ *map = m;
+ return EFI_SUCCESS;
-fail:
- *map->map = m;
+uninstall_table:
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+free_map:
+ efi_bs_call(free_pool, m);
return status;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 715f37479154..9fb5869896be 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -55,22 +55,13 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed)
{
- unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- unsigned long buff_size;
+ struct efi_boot_memmap *map;
efi_status_t status;
- efi_memory_desc_t *memory_map;
int map_offset;
- struct efi_boot_memmap map;
- map.map = &memory_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = NULL;
- map.key_ptr = NULL;
- map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return status;
@@ -80,8 +71,8 @@ efi_status_t efi_random_alloc(unsigned long size,
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
slots = get_entry_num_slots(md, size, ilog2(align));
@@ -109,8 +100,8 @@ efi_status_t efi_random_alloc(unsigned long size,
* to calculate the randomly chosen address, and allocate it directly
* using EFI_ALLOCATE_ADDRESS.
*/
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
efi_physical_addr_t target;
unsigned long pages;
@@ -133,7 +124,7 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, memory_map);
+ efi_bs_call(free_pool, map);
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index 8ee9eb2b9039..bf6fbd5d22a1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,21 +23,12 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
+ struct efi_boot_memmap *map;
efi_status_t status;
unsigned long nr_pages;
int i;
- struct efi_boot_memmap boot_map;
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
goto fail;
@@ -52,12 +43,12 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
size = round_up(size, EFI_ALLOC_ALIGN);
nr_pages = size / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
+ for (i = 0; i < map->map_size / map->desc_size; i++) {
efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
+ unsigned long m = (unsigned long)map->map;
u64 start, end;
- desc = efi_early_memdesc_ptr(m, desc_size, i);
+ desc = efi_early_memdesc_ptr(m, map->desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
@@ -87,7 +78,7 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
}
- if (i == map_size / desc_size)
+ if (i == map->map_size / map->desc_size)
status = EFI_NOT_FOUND;
efi_bs_call(free_pool, map);
diff --git a/drivers/firmware/efi/libstub/systable.c b/drivers/firmware/efi/libstub/systable.c
new file mode 100644
index 000000000000..91d016b02f8c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/systable.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+const efi_system_table_t *efi_system_table;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 7a7abc8959d2..b9ce6393e353 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -722,32 +722,22 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
efi_set_u64_split((unsigned long)efi_system_table,
&p->efi->efi_systab, &p->efi->efi_systab_hi);
- p->efi->efi_memdesc_size = *map->desc_size;
- p->efi->efi_memdesc_version = *map->desc_ver;
- efi_set_u64_split((unsigned long)*map->map,
+ p->efi->efi_memdesc_size = map->desc_size;
+ p->efi->efi_memdesc_version = map->desc_ver;
+ efi_set_u64_split((unsigned long)map->map,
&p->efi->efi_memmap, &p->efi->efi_memmap_hi);
- p->efi->efi_memmap_size = *map->map_size;
+ p->efi->efi_memmap_size = map->map_size;
return EFI_SUCCESS;
}
static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
- unsigned long map_sz, key, desc_size, buff_size;
- efi_memory_desc_t *mem_map;
struct setup_data *e820ext = NULL;
__u32 e820ext_size = 0;
efi_status_t status;
- __u32 desc_version;
- struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &mem_map;
- map.map_size = &map_sz;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_version;
- map.key_ptr = &key;
- map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
@@ -756,7 +746,7 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
return status;
/* Might as well exit boot services now */
- status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
if (status != EFI_SUCCESS)
return status;
@@ -782,7 +772,7 @@ unsigned long efi_main(efi_handle_t handle,
unsigned long bzimage_addr = (unsigned long)startup_32;
unsigned long buffer_start, buffer_end;
struct setup_header *hdr = &boot_params->hdr;
- unsigned long addr, size;
+ const struct linux_efi_initrd *initrd = NULL;
efi_status_t status;
efi_system_table = sys_table_arg;
@@ -877,17 +867,18 @@ unsigned long efi_main(efi_handle_t handle,
* arguments will be processed only if image is not NULL, which will be
* the case only if we were loaded via the PE entry point.
*/
- status = efi_load_initrd(image, &addr, &size, hdr->initrd_addr_max,
- ULONG_MAX);
+ status = efi_load_initrd(image, hdr->initrd_addr_max, ULONG_MAX,
+ &initrd);
if (status != EFI_SUCCESS)
goto fail;
- if (size > 0) {
- efi_set_u64_split(addr, &hdr->ramdisk_image,
+ if (initrd && initrd->size > 0) {
+ efi_set_u64_split(initrd->base, &hdr->ramdisk_image,
&boot_params->ext_ramdisk_image);
- efi_set_u64_split(size, &hdr->ramdisk_size,
+ efi_set_u64_split(initrd->size, &hdr->ramdisk_size,
&boot_params->ext_ramdisk_size);
}
+
/*
* If the boot loader gave us a value for secure_boot then we use that,
* otherwise we ask the BIOS.
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
new file mode 100644
index 000000000000..9e6fe061ab07
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/pe.h>
+
+#ifdef CONFIG_64BIT
+ .set .Lextra_characteristics, 0x0
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+#else
+ .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+#endif
+
+ .section ".head", "a"
+ .globl __efistub_efi_zboot_header
+__efistub_efi_zboot_header:
+.Ldoshdr:
+ .long MZ_MAGIC
+ .ascii "zimg" // image type
+ .long __efistub__gzdata_start - .Ldoshdr // payload offset
+ .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
+ .long 0, 0 // reserved
+ .asciz COMP_TYPE // compression type
+ .org .Ldoshdr + 0x3c
+ .long .Lpehdr - .Ldoshdr // PE header offset
+
+.Lpehdr:
+ .long PE_MAGIC
+ .short MACHINE_TYPE
+ .short .Lsection_count
+ .long 0
+ .long 0
+ .long 0
+ .short .Lsection_table - .Loptional_header
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED |\
+ .Lextra_characteristics
+
+.Loptional_header:
+ .short .Lpe_opt_magic
+ .byte 0, 0
+ .long _etext - .Lefi_header_end
+ .long __data_size
+ .long 0
+ .long __efistub_efi_zboot_entry - .Ldoshdr
+ .long .Lefi_header_end - .Ldoshdr
+
+#ifdef CONFIG_64BIT
+ .quad 0
+#else
+ .long _etext - .Ldoshdr, 0x0
+#endif
+ .long 4096
+ .long 512
+ .short 0, 0
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
+ .short 0, 0
+ .long 0
+ .long _end - .Ldoshdr
+
+ .long .Lefi_header_end - .Ldoshdr
+ .long 0
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION
+ .short 0
+#ifdef CONFIG_64BIT
+ .quad 0, 0, 0, 0
+#else
+ .long 0, 0, 0, 0
+#endif
+ .long 0
+ .long (.Lsection_table - .) / 8
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+#ifdef CONFIG_DEBUG_EFI
+ .long .Lefi_debug_table - .Ldoshdr // DebugTable
+ .long .Lefi_debug_table_size
+#endif
+
+.Lsection_table:
+ .ascii ".text\0\0\0"
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE
+
+ .ascii ".data\0\0\0"
+ .long __data_size
+ .long _etext - .Ldoshdr
+ .long __data_rawsize
+ .long _etext - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE
+
+ .set .Lsection_count, (. - .Lsection_table) / 40
+
+#ifdef CONFIG_DEBUG_EFI
+ .section ".rodata", "a"
+ .align 2
+.Lefi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long .Lefi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_debug_entry - .Ldoshdr // FileOffset
+
+ .set .Lefi_debug_table_size, . - .Lefi_debug_table
+ .previous
+
+.Lefi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz ZBOOT_EFI_PATH
+
+ .set .Lefi_debug_entry_size, . - .Lefi_debug_entry
+#endif
+
+ .p2align 12
+.Lefi_header_end:
+
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
new file mode 100644
index 000000000000..ea72c8f27da6
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/pe.h>
+#include <asm/efi.h>
+#include <asm/unaligned.h>
+
+#include "efistub.h"
+
+static unsigned char zboot_heap[SZ_256K] __aligned(64);
+static unsigned long free_mem_ptr, free_mem_end_ptr;
+
+#define STATIC static
+#if defined(CONFIG_KERNEL_GZIP)
+#include "../../../../lib/decompress_inflate.c"
+#elif defined(CONFIG_KERNEL_LZ4)
+#include "../../../../lib/decompress_unlz4.c"
+#elif defined(CONFIG_KERNEL_LZMA)
+#include "../../../../lib/decompress_unlzma.c"
+#elif defined(CONFIG_KERNEL_LZO)
+#include "../../../../lib/decompress_unlzo.c"
+#elif defined(CONFIG_KERNEL_XZ)
+#undef memcpy
+#define memcpy memcpy
+#undef memmove
+#define memmove memmove
+#include "../../../../lib/decompress_unxz.c"
+#elif defined(CONFIG_KERNEL_ZSTD)
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
+extern char efi_zboot_header[];
+extern char _gzdata_start[], _gzdata_end[];
+
+static void log(efi_char16_t str[])
+{
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, L"EFI decompressor: ");
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, L"\n");
+}
+
+static void error(char *x)
+{
+ log(L"error() called from decompressor library\n");
+}
+
+// Local version to avoid pulling in memcmp()
+static bool guids_eq(const efi_guid_t *a, const efi_guid_t *b)
+{
+ const u32 *l = (u32 *)a;
+ const u32 *r = (u32 *)b;
+
+ return l[0] == r[0] && l[1] == r[1] && l[2] == r[2] && l[3] == r[3];
+}
+
+static efi_status_t __efiapi
+load_file(efi_load_file_protocol_t *this, efi_device_path_protocol_t *rem,
+ bool boot_policy, unsigned long *bufsize, void *buffer)
+{
+ unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ struct efi_vendor_dev_path *vendor_dp;
+ bool decompress = false;
+ unsigned long size;
+ int ret;
+
+ if (rem == NULL || bufsize == NULL)
+ return EFI_INVALID_PARAMETER;
+
+ if (boot_policy)
+ return EFI_UNSUPPORTED;
+
+ // Look for our vendor media device node in the remaining file path
+ if (rem->type == EFI_DEV_MEDIA &&
+ rem->sub_type == EFI_DEV_MEDIA_VENDOR) {
+ vendor_dp = container_of(rem, struct efi_vendor_dev_path, header);
+ if (!guids_eq(&vendor_dp->vendorguid, &LINUX_EFI_ZBOOT_MEDIA_GUID))
+ return EFI_NOT_FOUND;
+
+ decompress = true;
+ rem = (void *)(vendor_dp + 1);
+ }
+
+ if (rem->type != EFI_DEV_END_PATH ||
+ rem->sub_type != EFI_DEV_END_ENTIRE)
+ return EFI_NOT_FOUND;
+
+ // The uncompressed size of the payload is appended to the raw bit
+ // stream, and may therefore appear misaligned in memory
+ size = decompress ? get_unaligned_le32(_gzdata_end - 4)
+ : compressed_size;
+ if (buffer == NULL || *bufsize < size) {
+ *bufsize = size;
+ return EFI_BUFFER_TOO_SMALL;
+ }
+
+ if (decompress) {
+ ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
+ buffer, size, NULL, error);
+ if (ret < 0) {
+ log(L"Decompression failed");
+ return EFI_DEVICE_ERROR;
+ }
+ } else {
+ memcpy(buffer, _gzdata_start, compressed_size);
+ }
+
+ return EFI_SUCCESS;
+}
+
+// Return the length in bytes of the device path up to the first end node.
+static int device_path_length(const efi_device_path_protocol_t *dp)
+{
+ int len = 0;
+
+ while (dp->type != EFI_DEV_END_PATH) {
+ len += dp->length;
+ dp = (void *)((u8 *)dp + dp->length);
+ }
+ return len;
+}
+
+static void append_rel_offset_node(efi_device_path_protocol_t **dp,
+ unsigned long start, unsigned long end)
+{
+ struct efi_rel_offset_dev_path *rodp = (void *)*dp;
+
+ rodp->header.type = EFI_DEV_MEDIA;
+ rodp->header.sub_type = EFI_DEV_MEDIA_REL_OFFSET;
+ rodp->header.length = sizeof(struct efi_rel_offset_dev_path);
+ rodp->reserved = 0;
+ rodp->starting_offset = start;
+ rodp->ending_offset = end;
+
+ *dp = (void *)(rodp + 1);
+}
+
+static void append_ven_media_node(efi_device_path_protocol_t **dp,
+ efi_guid_t *guid)
+{
+ struct efi_vendor_dev_path *vmdp = (void *)*dp;
+
+ vmdp->header.type = EFI_DEV_MEDIA;
+ vmdp->header.sub_type = EFI_DEV_MEDIA_VENDOR;
+ vmdp->header.length = sizeof(struct efi_vendor_dev_path);
+ vmdp->vendorguid = *guid;
+
+ *dp = (void *)(vmdp + 1);
+}
+
+static void append_end_node(efi_device_path_protocol_t **dp)
+{
+ (*dp)->type = EFI_DEV_END_PATH;
+ (*dp)->sub_type = EFI_DEV_END_ENTIRE;
+ (*dp)->length = sizeof(struct efi_generic_dev_path);
+
+ ++*dp;
+}
+
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
+{
+ struct efi_mem_mapped_dev_path mmdp = {
+ .header.type = EFI_DEV_HW,
+ .header.sub_type = EFI_DEV_MEM_MAPPED,
+ .header.length = sizeof(struct efi_mem_mapped_dev_path)
+ };
+ efi_device_path_protocol_t *parent_dp, *dpp, *lf2_dp, *li_dp;
+ efi_load_file2_protocol_t zboot_load_file2;
+ efi_loaded_image_t *parent, *child;
+ unsigned long exit_data_size;
+ efi_handle_t child_handle;
+ efi_handle_t zboot_handle;
+ efi_char16_t *exit_data;
+ efi_status_t status;
+ void *dp_alloc;
+ int dp_len;
+
+ WRITE_ONCE(efi_system_table, systab);
+
+ free_mem_ptr = (unsigned long)&zboot_heap;
+ free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
+
+ exit_data = NULL;
+ exit_data_size = 0;
+
+ status = efi_bs_call(handle_protocol, handle,
+ &LOADED_IMAGE_PROTOCOL_GUID, (void **)&parent);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to locate parent's loaded image protocol");
+ return status;
+ }
+
+ status = efi_bs_call(handle_protocol, handle,
+ &LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID,
+ (void **)&parent_dp);
+ if (status != EFI_SUCCESS || parent_dp == NULL) {
+ // Create a MemoryMapped() device path node to describe
+ // the parent image if no device path was provided.
+ mmdp.memory_type = parent->image_code_type;
+ mmdp.starting_addr = (unsigned long)parent->image_base;
+ mmdp.ending_addr = (unsigned long)parent->image_base +
+ parent->image_size - 1;
+ parent_dp = &mmdp.header;
+ dp_len = sizeof(mmdp);
+ } else {
+ dp_len = device_path_length(parent_dp);
+ }
+
+ // Allocate some pool memory for device path protocol data
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ 2 * (dp_len + sizeof(struct efi_rel_offset_dev_path) +
+ sizeof(struct efi_generic_dev_path)) +
+ sizeof(struct efi_vendor_dev_path),
+ (void **)&dp_alloc);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to allocate device path pool memory");
+ return status;
+ }
+
+ // Create a device path describing the compressed payload in this image
+ // <...parent_dp...>/Offset(<start>, <end>)
+ lf2_dp = memcpy(dp_alloc, parent_dp, dp_len);
+ dpp = (void *)((u8 *)lf2_dp + dp_len);
+ append_rel_offset_node(&dpp,
+ (unsigned long)(_gzdata_start - efi_zboot_header),
+ (unsigned long)(_gzdata_end - efi_zboot_header - 1));
+ append_end_node(&dpp);
+
+ // Create a device path describing the decompressed payload in this image
+ // <...parent_dp...>/Offset(<start>, <end>)/VenMedia(ZBOOT_MEDIA_GUID)
+ dp_len += sizeof(struct efi_rel_offset_dev_path);
+ li_dp = memcpy(dpp, lf2_dp, dp_len);
+ dpp = (void *)((u8 *)li_dp + dp_len);
+ append_ven_media_node(&dpp, &LINUX_EFI_ZBOOT_MEDIA_GUID);
+ append_end_node(&dpp);
+
+ zboot_handle = NULL;
+ zboot_load_file2.load_file = load_file;
+ status = efi_bs_call(install_multiple_protocol_interfaces,
+ &zboot_handle,
+ &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp,
+ &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2,
+ NULL);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to install LoadFile2 protocol and device path");
+ goto free_dpalloc;
+ }
+
+ status = efi_bs_call(load_image, false, handle, li_dp, NULL, 0,
+ &child_handle);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to load image");
+ goto uninstall_lf2;
+ }
+
+ status = efi_bs_call(handle_protocol, child_handle,
+ &LOADED_IMAGE_PROTOCOL_GUID, (void **)&child);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to locate child's loaded image protocol");
+ goto unload_image;
+ }
+
+ // Copy the kernel command line
+ child->load_options = parent->load_options;
+ child->load_options_size = parent->load_options_size;
+
+ status = efi_bs_call(start_image, child_handle, &exit_data_size,
+ &exit_data);
+ if (status != EFI_SUCCESS) {
+ log(L"StartImage() returned with error");
+ if (exit_data_size > 0)
+ log(exit_data);
+
+ // If StartImage() returns EFI_SECURITY_VIOLATION, the image is
+ // not unloaded so we need to do it by hand.
+ if (status == EFI_SECURITY_VIOLATION)
+unload_image:
+ efi_bs_call(unload_image, child_handle);
+ }
+
+uninstall_lf2:
+ efi_bs_call(uninstall_multiple_protocol_interfaces,
+ zboot_handle,
+ &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp,
+ &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2,
+ NULL);
+
+free_dpalloc:
+ efi_bs_call(free_pool, dp_alloc);
+
+ efi_bs_call(exit, handle, status, exit_data_size, exit_data);
+
+ // Free ExitData in case Exit() returned with a failure code,
+ // but return the original status code.
+ log(L"Exit() returned with failure code");
+ if (exit_data != NULL)
+ efi_bs_call(free_pool, exit_data);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
new file mode 100644
index 000000000000..87a62765bafd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ENTRY(__efistub_efi_zboot_header);
+
+SECTIONS
+{
+ .head : ALIGN(4096) {
+ *(.head)
+ }
+
+ .text : {
+ *(.text* .init.text*)
+ }
+
+ .rodata : ALIGN(8) {
+ __efistub__gzdata_start = .;
+ *(.gzdata)
+ __efistub__gzdata_end = .;
+ *(.rodata* .init.rodata* .srodata*)
+ _etext = ALIGN(4096);
+ . = _etext;
+ }
+
+ .data : ALIGN(4096) {
+ *(.data* .init.data*)
+ _edata = ALIGN(512);
+ . = _edata;
+ }
+
+ .bss : {
+ *(.bss* .init.bss*)
+ _end = ALIGN(512);
+ . = _end;
+ }
+
+ /DISCARD/ : {
+ *(.modinfo .init.modinfo)
+ }
+}
+
+PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start));
+
+PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
+PROVIDE(__data_size = ABSOLUTE(_end - _etext));
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index adaa492c3d2d..4e2575dfeb90 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -681,6 +681,15 @@ static struct notifier_block gsmi_die_notifier = {
static int gsmi_panic_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
return NOTIFY_DONE;
}
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index fd1fa55c9113..0914e7328b1a 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -77,12 +77,18 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
+#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
+/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
+#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
+#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
/* VF Device */
#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
+#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf
static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
@@ -96,6 +102,18 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
{0,}
};
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 5498bc337f8b..b9aae85ba930 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1866,7 +1866,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
return -EINVAL;
fds = memdup_user((void __user *)(arg + sizeof(hdr)),
- hdr.count * sizeof(s32));
+ array_size(hdr.count, sizeof(s32)));
if (IS_ERR(fds))
return PTR_ERR(fds);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 133e511355c9..79d48852825e 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -605,6 +605,9 @@ static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
},
+ {
+ .name = "d5005bmc-sec-update",
+ },
{ }
};
MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index bd284c7b8dc9..7436976ea904 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -395,4 +395,5 @@ static struct spi_driver mpf_driver = {
module_spi_driver(mpf_driver);
MODULE_DESCRIPTION("Microchip Polarfire SPI FPGA Manager");
+MODULE_AUTHOR("Ivan Bornyakov <i.bornyakov@metrotek.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 3a7b78e36701..694e80c06665 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -392,8 +392,8 @@ int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
}
EXPORT_SYMBOL_GPL(fsi_slave_write);
-extern int fsi_slave_claim_range(struct fsi_slave *slave,
- uint32_t addr, uint32_t size)
+int fsi_slave_claim_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
{
if (addr + size < addr)
return -EINVAL;
@@ -406,8 +406,8 @@ extern int fsi_slave_claim_range(struct fsi_slave *slave,
}
EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
-extern void fsi_slave_release_range(struct fsi_slave *slave,
- uint32_t addr, uint32_t size)
+void fsi_slave_release_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
{
}
EXPORT_SYMBOL_GPL(fsi_slave_release_range);
@@ -1314,6 +1314,9 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+ if (master->idx < 0)
+ return master->idx;
+
dev_set_name(&master->dev, "fsi%d", master->idx);
master->dev.class = &fsi_master_class;
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 24292acdbaf8..5f608ef8b53c 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1324,12 +1324,14 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
}
master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
if (IS_ERR(master->cvic)) {
+ of_node_put(np);
rc = PTR_ERR(master->cvic);
dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
goto err_free;
}
rc = of_property_read_u32(np, "copro-sw-interrupts",
&master->cvic_sw_irq);
+ of_node_put(np);
if (rc) {
dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
goto err_free;
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index cd6bee5e12a7..4762315a46ba 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -51,7 +51,7 @@
#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-/* MRESB: Reset brindge */
+/* MRESB: Reset bridge */
#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index c9cc75fbdfb9..abdd37d5507f 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -44,6 +44,7 @@ struct occ {
struct device *sbefifo;
char name[32];
int idx;
+ bool platform_hwmon;
u8 sequence_number;
void *buffer;
void *client_buffer;
@@ -94,6 +95,7 @@ static int occ_open(struct inode *inode, struct file *file)
client->occ = occ;
mutex_init(&client->lock);
file->private_data = client;
+ get_device(occ->dev);
/* We allocate a 1-page buffer, make sure it all fits */
BUILD_BUG_ON((OCC_CMD_DATA_BYTES + 3) > PAGE_SIZE);
@@ -197,6 +199,7 @@ static int occ_release(struct inode *inode, struct file *file)
{
struct occ_client *client = file->private_data;
+ put_device(client->occ->dev);
free_page((unsigned long)client->buffer);
kfree(client);
@@ -246,7 +249,7 @@ static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
if (checksum != checksum_resp) {
dev_err(occ->dev, "Bad checksum: %04x!=%04x\n", checksum,
checksum_resp);
- return -EBADMSG;
+ return -EBADE;
}
return 0;
@@ -493,12 +496,19 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
for (i = 1; i < req_len - 2; ++i)
checksum += byte_request[i];
- mutex_lock(&occ->occ_lock);
+ rc = mutex_lock_interruptible(&occ->occ_lock);
+ if (rc)
+ return rc;
occ->client_buffer = response;
occ->client_buffer_size = user_resp_len;
occ->client_response_size = 0;
+ if (!occ->buffer) {
+ rc = -ENOENT;
+ goto done;
+ }
+
/*
* Get a sequence number and update the counter. Avoid a sequence
* number of 0 which would pass the response check below even if the
@@ -575,8 +585,11 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
dev_dbg(dev, "resp_status=%02x resp_data_len=%d\n",
resp->return_status, resp_data_length);
- occ->client_response_size = resp_data_length + 7;
rc = occ_verify_checksum(occ, resp, resp_data_length);
+ if (rc)
+ goto done;
+
+ occ->client_response_size = resp_data_length + 7;
done:
*resp_len = occ->client_response_size;
@@ -586,7 +599,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
}
EXPORT_SYMBOL_GPL(fsi_occ_submit);
-static int occ_unregister_child(struct device *dev, void *data)
+static int occ_unregister_platform_child(struct device *dev, void *data)
{
struct platform_device *hwmon_dev = to_platform_device(dev);
@@ -595,12 +608,25 @@ static int occ_unregister_child(struct device *dev, void *data)
return 0;
}
+static int occ_unregister_of_child(struct device *dev, void *data)
+{
+ struct platform_device *hwmon_dev = to_platform_device(dev);
+
+ of_device_unregister(hwmon_dev);
+ if (dev->of_node)
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
static int occ_probe(struct platform_device *pdev)
{
int rc;
u32 reg;
+ char child_name[32];
struct occ *occ;
- struct platform_device *hwmon_dev;
+ struct platform_device *hwmon_dev = NULL;
+ struct device_node *hwmon_node;
struct device *dev = &pdev->dev;
struct platform_device_info hwmon_dev_info = {
.parent = dev,
@@ -659,10 +685,20 @@ static int occ_probe(struct platform_device *pdev)
return rc;
}
- hwmon_dev_info.id = occ->idx;
- hwmon_dev = platform_device_register_full(&hwmon_dev_info);
- if (IS_ERR(hwmon_dev))
- dev_warn(dev, "failed to create hwmon device\n");
+ hwmon_node = of_get_child_by_name(dev->of_node, hwmon_dev_info.name);
+ if (hwmon_node) {
+ snprintf(child_name, sizeof(child_name), "%s.%d", hwmon_dev_info.name, occ->idx);
+ hwmon_dev = of_platform_device_create(hwmon_node, child_name, dev);
+ of_node_put(hwmon_node);
+ }
+
+ if (!hwmon_dev) {
+ occ->platform_hwmon = true;
+ hwmon_dev_info.id = occ->idx;
+ hwmon_dev = platform_device_register_full(&hwmon_dev_info);
+ if (IS_ERR(hwmon_dev))
+ dev_warn(dev, "failed to create hwmon device\n");
+ }
return 0;
}
@@ -671,11 +707,17 @@ static int occ_remove(struct platform_device *pdev)
{
struct occ *occ = platform_get_drvdata(pdev);
- kvfree(occ->buffer);
-
misc_deregister(&occ->mdev);
- device_for_each_child(&pdev->dev, NULL, occ_unregister_child);
+ mutex_lock(&occ->occ_lock);
+ kvfree(occ->buffer);
+ occ->buffer = NULL;
+ mutex_unlock(&occ->occ_lock);
+
+ if (occ->platform_hwmon)
+ device_for_each_child(&pdev->dev, NULL, occ_unregister_platform_child);
+ else
+ device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
ida_simple_remove(&occ_ida, occ->idx);
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index f52a912cdf16..5f93a53846aa 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -477,7 +477,8 @@ static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
if (!ready) {
sysfs_notify(&sbefifo->dev.kobj, NULL, dev_attr_timeout.attr.name);
sbefifo->timed_out = true;
- dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+ dev_err(dev, "%s FIFO Timeout (%u ms)! status=%08x\n",
+ up ? "UP" : "DOWN", jiffies_to_msecs(timeout), sts);
return -ETIMEDOUT;
}
dev_vdbg(dev, "End of wait status: %08x\n", sts);
@@ -497,8 +498,8 @@ static int sbefifo_send_command(struct sbefifo *sbefifo,
u32 status;
int rc;
- dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
- cmd_len, be32_to_cpu(command[1]));
+ dev_dbg(dev, "sending command (%zd words, cmd=%04x)\n",
+ cmd_len, be32_to_cpu(command[1]));
/* As long as there's something to send */
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
@@ -551,21 +552,23 @@ static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *respo
size_t len;
int rc;
- dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+ dev_dbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
timeout = msecs_to_jiffies(sbefifo->timeout_start_rsp_ms);
for (;;) {
/* Grab FIFO status (this will handle parity errors) */
rc = sbefifo_wait(sbefifo, false, &status, timeout);
- if (rc < 0)
+ if (rc < 0) {
+ dev_dbg(dev, "timeout waiting (%u ms)\n", jiffies_to_msecs(timeout));
return rc;
+ }
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
/* Decode status */
len = sbefifo_populated(status);
eot_set = sbefifo_eot_set(status);
- dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
+ dev_dbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
/* Go through the chunk */
while(len--) {
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index e6f94501cb28..1e82b7967570 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -217,7 +217,7 @@ static void gnss_device_release(struct device *dev)
kfree(gdev->write_buf);
kfifo_free(&gdev->read_fifo);
- ida_simple_remove(&gnss_minors, gdev->id);
+ ida_free(&gnss_minors, gdev->id);
kfree(gdev);
}
@@ -232,7 +232,7 @@ struct gnss_device *gnss_allocate_device(struct device *parent)
if (!gdev)
return NULL;
- id = ida_simple_get(&gnss_minors, 0, GNSS_MINORS, GFP_KERNEL);
+ id = ida_alloc_max(&gnss_minors, GNSS_MINORS - 1, GFP_KERNEL);
if (id < 0) {
kfree(gdev);
return NULL;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3f64345fe40b..a01af1180616 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -341,6 +341,10 @@ config GPIO_ICH
If unsure, say N.
+config GPIO_IMX_SCU
+ def_bool y
+ depends on IMX_SCU
+
config GPIO_IOP
tristate "Intel IOP GPIO"
depends on ARCH_IOP32X || COMPILE_TEST
@@ -986,20 +990,6 @@ endmenu
menu "I2C GPIO expanders"
depends on I2C
-config GPIO_ADP5588
- tristate "ADP5588 I2C GPIO expander"
- help
- This option enables support for 18 GPIOs found
- on Analog Devices ADP5588 GPIO Expanders.
-
-config GPIO_ADP5588_IRQ
- bool "Interrupt controller support for ADP5588"
- depends on GPIO_ADP5588=y
- select GPIOLIB_IRQCHIP
- help
- Say yes here to enable the adp5588 to be used as an interrupt
- controller. It requires the driver to be built in the kernel.
-
config GPIO_ADNP
tristate "Avionic Design N-bit GPIO expander"
depends on OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a0985d30f51b..29e3beb6548c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
-obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
@@ -70,6 +69,7 @@ obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
+obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 74cc71bb3984..7b8829c8e423 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -34,7 +34,8 @@ module_param_hw_array(base, uint, ioport, &num_dio48e, 0);
MODULE_PARM_DESC(base, "ACCES 104-DIO-48E base addresses");
static unsigned int irq[MAX_NUM_DIO48E];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
#define DIO48E_NUM_PPI 2
@@ -362,7 +363,7 @@ static struct isa_driver dio48e_driver = {
.name = "104-dio-48e"
},
};
-module_isa_driver(dio48e_driver, num_dio48e);
+module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 3286b914a2cf..c5e231fde1af 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -34,7 +34,8 @@ module_param_hw_array(base, uint, ioport, &num_idi_48, 0);
MODULE_PARM_DESC(base, "ACCES 104-IDI-48 base addresses");
static unsigned int irq[MAX_NUM_IDI_48];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
@@ -304,7 +305,7 @@ static struct isa_driver idi_48_driver = {
.name = "104-idi-48"
},
};
-module_isa_driver(idi_48_driver, num_idi_48);
+module_isa_driver_with_irq(idi_48_driver, num_idi_48, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 4756e583f223..718bd54e2a25 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -30,7 +30,8 @@ module_param_hw_array(base, uint, ioport, &num_idio_16, 0);
MODULE_PARM_DESC(base, "ACCES 104-IDIO-16 base addresses");
static unsigned int irq[MAX_NUM_IDIO_16];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
@@ -337,7 +338,7 @@ static struct isa_driver idio_16_driver = {
},
};
-module_isa_driver(idio_16_driver, num_idio_16);
+module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
deleted file mode 100644
index 9b562dbbd733..000000000000
--- a/drivers/gpio/gpio-adp5588.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * GPIO Chip driver for Analog Devices
- * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#include <linux/gpio/driver.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/adp5588.h>
-
-/*
- * Early pre 4.0 Silicon required to delay readout by at least 25ms,
- * since the Event Counter Register updated 25ms after the interrupt
- * asserted.
- */
-#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
-
-struct adp5588_gpio {
- struct i2c_client *client;
- struct gpio_chip gpio_chip;
- struct mutex lock; /* protect cached dir, dat_out */
- /* protect serialized access to the interrupt controller bus */
- struct mutex irq_lock;
- uint8_t dat_out[3];
- uint8_t dir[3];
- uint8_t int_lvl_low[3];
- uint8_t int_lvl_high[3];
- uint8_t int_en[3];
- uint8_t irq_mask[3];
- uint8_t int_input_en[3];
-};
-
-static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "Read Error\n");
-
- return ret;
-}
-
-static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val)
-{
- int ret = i2c_smbus_write_byte_data(client, reg, val);
-
- if (ret < 0)
- dev_err(&client->dev, "Write Error\n");
-
- return ret;
-}
-
-static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
-{
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
- unsigned bank = ADP5588_BANK(off);
- unsigned bit = ADP5588_BIT(off);
- int val;
-
- mutex_lock(&dev->lock);
-
- if (dev->dir[bank] & bit)
- val = dev->dat_out[bank];
- else
- val = adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + bank);
-
- mutex_unlock(&dev->lock);
-
- return !!(val & bit);
-}
-
-static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
-{
- unsigned bank, bit;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
- bit = ADP5588_BIT(off);
-
- mutex_lock(&dev->lock);
- if (val)
- dev->dat_out[bank] |= bit;
- else
- dev->dat_out[bank] &= ~bit;
-
- adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
- dev->dat_out[bank]);
- mutex_unlock(&dev->lock);
-}
-
-static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
-{
- int ret;
- unsigned bank;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
-
- mutex_lock(&dev->lock);
- dev->dir[bank] &= ~ADP5588_BIT(off);
- ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
- mutex_unlock(&dev->lock);
-
- return ret;
-}
-
-static int adp5588_gpio_direction_output(struct gpio_chip *chip,
- unsigned off, int val)
-{
- int ret;
- unsigned bank, bit;
- struct adp5588_gpio *dev = gpiochip_get_data(chip);
-
- bank = ADP5588_BANK(off);
- bit = ADP5588_BIT(off);
-
- mutex_lock(&dev->lock);
- dev->dir[bank] |= bit;
-
- if (val)
- dev->dat_out[bank] |= bit;
- else
- dev->dat_out[bank] &= ~bit;
-
- ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
- dev->dat_out[bank]);
- ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank,
- dev->dir[bank]);
- mutex_unlock(&dev->lock);
-
- return ret;
-}
-
-#ifdef CONFIG_GPIO_ADP5588_IRQ
-
-static void adp5588_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- mutex_lock(&dev->irq_lock);
-}
-
- /*
- * genirq core code can issue chip->mask/unmask from atomic context.
- * This doesn't work for slow busses where an access needs to sleep.
- * bus_sync_unlock() is therefore called outside the atomic context,
- * syncs the current irq mask state with the slow external controller
- * and unlocks the bus.
- */
-
-static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- int i;
-
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- if (dev->int_input_en[i]) {
- mutex_lock(&dev->lock);
- dev->dir[i] &= ~dev->int_input_en[i];
- dev->int_input_en[i] = 0;
- adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
- dev->dir[i]);
- mutex_unlock(&dev->lock);
- }
-
- if (dev->int_en[i] ^ dev->irq_mask[i]) {
- dev->int_en[i] = dev->irq_mask[i];
- adp5588_gpio_write(dev->client, GPI_EM1 + i,
- dev->int_en[i]);
- }
- }
-
- mutex_unlock(&dev->irq_lock);
-}
-
-static void adp5588_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- dev->irq_mask[ADP5588_BANK(d->hwirq)] &= ~ADP5588_BIT(d->hwirq);
-}
-
-static void adp5588_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
-
- dev->irq_mask[ADP5588_BANK(d->hwirq)] |= ADP5588_BIT(d->hwirq);
-}
-
-static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- uint16_t gpio = d->hwirq;
- unsigned bank, bit;
-
- bank = ADP5588_BANK(gpio);
- bit = ADP5588_BIT(gpio);
-
- dev->int_lvl_low[bank] &= ~bit;
- dev->int_lvl_high[bank] &= ~bit;
-
- if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_HIGH)
- dev->int_lvl_high[bank] |= bit;
-
- if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_LOW)
- dev->int_lvl_low[bank] |= bit;
-
- dev->int_input_en[bank] |= bit;
-
- return 0;
-}
-
-static struct irq_chip adp5588_irq_chip = {
- .name = "adp5588",
- .irq_mask = adp5588_irq_mask,
- .irq_unmask = adp5588_irq_unmask,
- .irq_bus_lock = adp5588_irq_bus_lock,
- .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock,
- .irq_set_type = adp5588_irq_set_type,
-};
-
-static irqreturn_t adp5588_irq_handler(int irq, void *devid)
-{
- struct adp5588_gpio *dev = devid;
- int status = adp5588_gpio_read(dev->client, INT_STAT);
-
- if (status & ADP5588_KE_INT) {
- int ev_cnt = adp5588_gpio_read(dev->client, KEY_LCK_EC_STAT);
-
- if (ev_cnt > 0) {
- int i;
-
- for (i = 0; i < (ev_cnt & ADP5588_KEC); i++) {
- int key = adp5588_gpio_read(dev->client,
- Key_EVENTA + i);
- /* GPIN events begin at 97,
- * bit 7 indicates logic level
- */
- int gpio = (key & 0x7f) - 97;
- int lvl = key & (1 << 7);
- int bank = ADP5588_BANK(gpio);
- int bit = ADP5588_BIT(gpio);
-
- if ((lvl && dev->int_lvl_high[bank] & bit) ||
- (!lvl && dev->int_lvl_low[bank] & bit))
- handle_nested_irq(irq_find_mapping(
- dev->gpio_chip.irq.domain, gpio));
- }
- }
- }
-
- adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
-
- return IRQ_HANDLED;
-}
-
-
-static int adp5588_irq_init_hw(struct gpio_chip *gc)
-{
- struct adp5588_gpio *dev = gpiochip_get_data(gc);
- /* Enable IRQs after registering chip */
- adp5588_gpio_write(dev->client, CFG,
- ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_KE_IEN);
-
- return 0;
-}
-
-static int adp5588_irq_setup(struct adp5588_gpio *dev)
-{
- struct i2c_client *client = dev->client;
- int ret;
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
- struct gpio_irq_chip *girq;
-
- adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
- adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
-
- mutex_init(&dev->irq_lock);
-
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, adp5588_irq_handler, IRQF_ONESHOT
- | IRQF_TRIGGER_FALLING | IRQF_SHARED,
- dev_name(&client->dev), dev);
- if (ret) {
- dev_err(&client->dev, "failed to request irq %d\n",
- client->irq);
- return ret;
- }
-
- /* This will be registered in the call to devm_gpiochip_add_data() */
- girq = &dev->gpio_chip.irq;
- girq->chip = &adp5588_irq_chip;
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->first = pdata ? pdata->irq_base : 0;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
- girq->init_hw = adp5588_irq_init_hw;
- girq->threaded = true;
-
- return 0;
-}
-
-#else
-static int adp5588_irq_setup(struct adp5588_gpio *dev)
-{
- struct i2c_client *client = dev->client;
- dev_warn(&client->dev, "interrupt support not compiled in\n");
-
- return 0;
-}
-
-#endif /* CONFIG_GPIO_ADP5588_IRQ */
-
-static int adp5588_gpio_probe(struct i2c_client *client)
-{
- struct adp5588_gpio_platform_data *pdata =
- dev_get_platdata(&client->dev);
- struct adp5588_gpio *dev;
- struct gpio_chip *gc;
- int ret, i, revid;
- unsigned int pullup_dis_mask = 0;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
- return -EIO;
- }
-
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->client = client;
-
- gc = &dev->gpio_chip;
- gc->direction_input = adp5588_gpio_direction_input;
- gc->direction_output = adp5588_gpio_direction_output;
- gc->get = adp5588_gpio_get_value;
- gc->set = adp5588_gpio_set_value;
- gc->can_sleep = true;
- gc->base = -1;
- gc->parent = &client->dev;
-
- if (pdata) {
- gc->base = pdata->gpio_start;
- gc->names = pdata->names;
- pullup_dis_mask = pdata->pullup_dis_mask;
- }
-
- gc->ngpio = ADP5588_MAXGPIO;
- gc->label = client->name;
- gc->owner = THIS_MODULE;
-
- mutex_init(&dev->lock);
-
- ret = adp5588_gpio_read(dev->client, DEV_ID);
- if (ret < 0)
- return ret;
-
- revid = ret & ADP5588_DEVICE_ID_MASK;
-
- for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
- dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
- ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
- ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
- (pullup_dis_mask >> (8 * i)) & 0xFF);
- ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
- if (ret)
- return ret;
- }
-
- if (client->irq) {
- if (WA_DELAYED_READOUT_REVID(revid)) {
- dev_warn(&client->dev, "GPIO int not supported\n");
- } else {
- ret = adp5588_irq_setup(dev);
- if (ret)
- return ret;
- }
- }
-
- ret = devm_gpiochip_add_data(&client->dev, &dev->gpio_chip, dev);
- if (ret)
- return ret;
-
- i2c_set_clientdata(client, dev);
-
- return 0;
-}
-
-static void adp5588_gpio_remove(struct i2c_client *client)
-{
- struct adp5588_gpio *dev = i2c_get_clientdata(client);
-
- if (dev->client->irq)
- free_irq(dev->client->irq, dev);
-}
-
-static const struct i2c_device_id adp5588_gpio_id[] = {
- { "adp5588-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
-
-static const struct of_device_id adp5588_gpio_of_id[] = {
- { .compatible = "adi,adp5588-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id);
-
-static struct i2c_driver adp5588_gpio_driver = {
- .driver = {
- .name = "adp5588-gpio",
- .of_match_table = adp5588_gpio_of_id,
- },
- .probe_new = adp5588_gpio_probe,
- .remove = adp5588_gpio_remove,
- .id_table = adp5588_gpio_id,
-};
-
-module_i2c_driver(adp5588_gpio_driver);
-
-MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
-MODULE_DESCRIPTION("GPIO ADP5588 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index d37de78247a6..482f678c893e 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -21,6 +21,12 @@
#define EXAR_OFFSET_MPIOLVL_HI 0x96
#define EXAR_OFFSET_MPIOSEL_HI 0x99
+/*
+ * The Device Configuration and UART Configuration Registers
+ * for each UART channel take 1KB of memory address space.
+ */
+#define EXAR_UART_CHANNEL_SIZE 0x400
+
#define DRIVER_NAME "gpio_exar"
static DEFINE_IDA(ida_index);
@@ -31,26 +37,39 @@ struct exar_gpio_chip {
int index;
char name[20];
unsigned int first_pin;
+ /*
+ * The offset to the cascaded device's (if existing)
+ * Device Configuration Registers.
+ */
+ unsigned int cascaded_offset;
};
static unsigned int
exar_offset_to_sel_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOSEL_HI
- : EXAR_OFFSET_MPIOSEL_LO;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+ unsigned int cascaded = offset / 16;
+ unsigned int addr = pin / 8 ? EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
+
+ return addr + (cascaded ? exar_gpio->cascaded_offset : 0);
}
static unsigned int
exar_offset_to_lvl_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOLVL_HI
- : EXAR_OFFSET_MPIOLVL_LO;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+ unsigned int cascaded = offset / 16;
+ unsigned int addr = pin / 8 ? EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
+
+ return addr + (cascaded ? exar_gpio->cascaded_offset : 0);
}
static unsigned int
exar_offset_to_bit(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) % 8;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+
+ return pin % 8;
}
static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -154,6 +173,17 @@ static int gpio_exar_probe(struct platform_device *pdev)
return -ENOMEM;
/*
+ * If cascaded, secondary xr17v354 or xr17v358 have the same amount
+ * of MPIOs as their primaries and the last 4 bits of the primary's
+ * PCI Device ID is the number of its UART channels.
+ */
+ if (pcidev->device & GENMASK(15, 12)) {
+ ngpios += ngpios;
+ exar_gpio->cascaded_offset = (pcidev->device & GENMASK(3, 0)) *
+ EXAR_UART_CHANNEL_SIZE;
+ }
+
+ /*
* We don't need to check the return values of mmio regmap operations (unless
* the regmap has a clock attached which is not the case here).
*/
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
new file mode 100644
index 000000000000..17be21b8f3b7
--- /dev/null
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021~2022 NXP
+ *
+ * The driver exports a standard gpiochip interface
+ * to control the PIN resources on SCU domain.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/imx/svc/rm.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+struct scu_gpio_priv {
+ struct gpio_chip chip;
+ struct mutex lock;
+ struct device *dev;
+ struct imx_sc_ipc *handle;
+};
+
+static unsigned int scu_rsrc_arr[] = {
+ IMX_SC_R_BOARD_R0,
+ IMX_SC_R_BOARD_R1,
+ IMX_SC_R_BOARD_R2,
+ IMX_SC_R_BOARD_R3,
+ IMX_SC_R_BOARD_R4,
+ IMX_SC_R_BOARD_R5,
+ IMX_SC_R_BOARD_R6,
+ IMX_SC_R_BOARD_R7,
+};
+
+static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct scu_gpio_priv *priv = gpiochip_get_data(chip);
+ int level;
+ int err;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ mutex_unlock(&priv->lock);
+
+ if (err) {
+ dev_err(priv->dev, "SCU get failed: %d\n", err);
+ return err;
+ }
+
+ return level;
+}
+
+static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct scu_gpio_priv *priv = gpiochip_get_data(chip);
+ int err;
+
+ if (offset >= chip->ngpio)
+ return;
+
+ mutex_lock(&priv->lock);
+
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ mutex_unlock(&priv->lock);
+
+ if (err)
+ dev_err(priv->dev, "SCU set (%d) failed: %d\n",
+ scu_rsrc_arr[offset], err);
+}
+
+static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int imx_scu_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct scu_gpio_priv *priv;
+ struct gpio_chip *gc;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = imx_scu_get_handle(&priv->handle);
+ if (ret)
+ return ret;
+
+ priv->dev = dev;
+ mutex_init(&priv->lock);
+
+ gc = &priv->chip;
+ gc->base = -1;
+ gc->parent = dev;
+ gc->ngpio = sizeof(scu_rsrc_arr)/sizeof(unsigned int);
+ gc->label = dev_name(dev);
+ gc->get = imx_scu_gpio_get;
+ gc->set = imx_scu_gpio_set;
+ gc->get_direction = imx_scu_gpio_get_direction;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+
+static const struct of_device_id imx_scu_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-sc-gpio" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver imx_scu_gpio_driver = {
+ .driver = {
+ .name = "gpio-imx-scu",
+ .of_match_table = imx_scu_gpio_dt_ids,
+ },
+ .probe = imx_scu_gpio_probe,
+};
+
+static int __init _imx_scu_gpio_init(void)
+{
+ return platform_driver_register(&imx_scu_gpio_driver);
+}
+
+subsys_initcall_sync(_imx_scu_gpio_init);
+
+MODULE_AUTHOR("Shenwei Wang <shenwei.wang@nxp.com>");
+MODULE_DESCRIPTION("NXP GPIO over IMX SCU API");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 64cb060d9d75..77a41151c921 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -273,10 +273,8 @@ static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
pending = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CAUSE_EVTEN0);
writel(pending, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
- for_each_set_bit(level, &pending, gc->ngpio) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, level);
- generic_handle_irq(gpio_irq);
- }
+ for_each_set_bit(level, &pending, gc->ngpio)
+ generic_handle_domain_irq_safe(gc->irq.domain, level);
return IRQ_RETVAL(pending);
}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index f163f5ca857b..93facbebb80e 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -299,7 +298,6 @@ static int
mediatek_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct mtk *mtk;
int i;
int ret;
@@ -312,7 +310,10 @@ mediatek_gpio_probe(struct platform_device *pdev)
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
- mtk->gpio_irq = irq_of_parse_and_map(np, 0);
+ mtk->gpio_irq = platform_get_irq(pdev, 0);
+ if (mtk->gpio_irq < 0)
+ return mtk->gpio_irq;
+
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index cf9bf3fcaee2..ebe1943b85dd 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -66,6 +66,7 @@
#define PCA_LATCH_INT (PCA_PCAL | PCA_INT)
#define PCA953X_TYPE BIT(12)
#define PCA957X_TYPE BIT(13)
+#define PCAL653X_TYPE BIT(14)
#define PCA_TYPE_MASK GENMASK(15, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -89,8 +90,10 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
{ "pca9698", 40 | PCA953X_TYPE, },
+ { "pcal6408", 8 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal6416", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal6524", 24 | PCA953X_TYPE | PCA_LATCH_INT, },
+ { "pcal6534", 34 | PCAL653X_TYPE | PCA_LATCH_INT, },
{ "pcal9535", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9554b", 8 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9555a", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
@@ -211,6 +214,10 @@ struct pca953x_chip {
struct regulator *regulator;
const struct pca953x_reg_config *regs;
+
+ u8 (*recalc_addr)(struct pca953x_chip *chip, int reg, int off);
+ bool (*check_reg)(struct pca953x_chip *chip, unsigned int reg,
+ u32 checkbank);
};
static int pca953x_bank_shift(struct pca953x_chip *chip)
@@ -288,18 +295,67 @@ static bool pca953x_check_register(struct pca953x_chip *chip, unsigned int reg,
return true;
}
+/*
+ * Unfortunately, whilst the PCAL6534 chip (and compatibles) broadly follow the
+ * same register layout as the PCAL6524, the spacing of the registers has been
+ * fundamentally altered by compacting them and thus does not obey the same
+ * rules, including being able to use bit shifting to determine bank. These
+ * chips hence need special handling here.
+ */
+static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
+ u32 checkbank)
+{
+ int bank;
+ int offset;
+
+ if (reg >= 0x30) {
+ /*
+ * Reserved block between 14h and 2Fh does not align on
+ * expected bank boundaries like other devices.
+ */
+ int temp = reg - 0x30;
+
+ bank = temp / NBANK(chip);
+ offset = temp - (bank * NBANK(chip));
+ bank += 8;
+ } else if (reg >= 0x54) {
+ /*
+ * Handle lack of reserved registers after output port
+ * configuration register to form a bank.
+ */
+ int temp = reg - 0x54;
+
+ bank = temp / NBANK(chip);
+ offset = temp - (bank * NBANK(chip));
+ bank += 16;
+ } else {
+ bank = reg / NBANK(chip);
+ offset = reg - (bank * NBANK(chip));
+ }
+
+ /* Register is not in the matching bank. */
+ if (!(BIT(bank) & checkbank))
+ return false;
+
+ /* Register is not within allowed range of bank. */
+ if (offset >= NBANK(chip))
+ return false;
+
+ return true;
+}
+
static bool pca953x_readable_register(struct device *dev, unsigned int reg)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
- PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT |
PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG |
PCA957x_BANK_BUSHOLD;
+ } else {
+ bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
+ PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
}
if (chip->driver_data & PCA_PCAL) {
@@ -308,7 +364,7 @@ static bool pca953x_readable_register(struct device *dev, unsigned int reg)
PCAL9xxx_BANK_IRQ_STAT;
}
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
@@ -316,19 +372,19 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
- PCA953x_BANK_CONFIG;
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY |
PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD;
+ } else {
+ bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
+ PCA953x_BANK_CONFIG;
}
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_PULL_EN |
PCAL9xxx_BANK_PULL_SEL | PCAL9xxx_BANK_IRQ_MASK;
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
@@ -336,15 +392,15 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
- bank = PCA953x_BANK_INPUT;
- else
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
bank = PCA957x_BANK_INPUT;
+ else
+ bank = PCA953x_BANK_INPUT;
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IRQ_STAT;
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static const struct regmap_config pca953x_i2c_regmap = {
@@ -389,9 +445,42 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off)
return regaddr;
}
+/*
+ * The PCAL6534 and compatible chips have altered bank alignment that doesn't
+ * fit within the bit shifting scheme used for other devices.
+ */
+static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
+{
+ int addr;
+ int pinctrl;
+
+ addr = (reg & PCAL_GPIO_MASK) * NBANK(chip);
+
+ switch (reg) {
+ case PCAL953X_OUT_STRENGTH:
+ case PCAL953X_IN_LATCH:
+ case PCAL953X_PULL_EN:
+ case PCAL953X_PULL_SEL:
+ case PCAL953X_INT_MASK:
+ case PCAL953X_INT_STAT:
+ case PCAL953X_OUT_CONF:
+ pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x20;
+ break;
+ case PCAL6524_INT_EDGE:
+ case PCAL6524_INT_CLR:
+ case PCAL6524_IN_STATUS:
+ case PCAL6524_OUT_INDCONF:
+ case PCAL6524_DEBOUNCE:
+ pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
+ break;
+ }
+
+ return pinctrl + addr + (off / BANK_SZ);
+}
+
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
+ u8 regaddr = chip->recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -409,7 +498,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
+ u8 regaddr = chip->recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -428,7 +517,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -442,8 +531,8 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
+ u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -463,7 +552,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off);
+ u8 inreg = chip->recalc_addr(chip, chip->regs->input, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -480,7 +569,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
+ u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
@@ -491,7 +580,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -548,8 +637,10 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned int offset,
unsigned long config)
{
- u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset);
- u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset);
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset);
+ u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
int ret;
@@ -563,9 +654,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
mutex_lock(&chip->i2c_lock);
/* Configure pull-up/pull-down */
- if (config == PIN_CONFIG_BIAS_PULL_UP)
+ if (param == PIN_CONFIG_BIAS_PULL_UP)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
- else if (config == PIN_CONFIG_BIAS_PULL_DOWN)
+ else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
else
ret = 0;
@@ -573,7 +664,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
goto exit;
/* Disable/Enable pull-up/pull-down */
- if (config == PIN_CONFIG_BIAS_DISABLE)
+ if (param == PIN_CONFIG_BIAS_DISABLE)
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
else
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
@@ -912,13 +1003,13 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
u8 regaddr;
int ret;
- regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
@@ -1037,6 +1128,14 @@ static int pca953x_probe(struct i2c_client *client,
regmap_config = &pca953x_i2c_regmap;
}
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) {
+ chip->recalc_addr = pcal6534_recalc_addr;
+ chip->check_reg = pcal6534_check_register;
+ } else {
+ chip->recalc_addr = pca953x_recalc_addr;
+ chip->check_reg = pca953x_check_register;
+ }
+
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
@@ -1068,13 +1167,12 @@ static int pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
-
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- chip->regs = &pca953x_regs;
- ret = device_pca95xx_init(chip, invert);
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
chip->regs = &pca957x_regs;
ret = device_pca957x_init(chip, invert);
+ } else {
+ chip->regs = &pca953x_regs;
+ ret = device_pca95xx_init(chip, invert);
}
if (ret)
goto err_exit;
@@ -1125,14 +1223,14 @@ static int pca953x_regcache_sync(struct device *dev)
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
@@ -1141,7 +1239,7 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ regaddr = chip->recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) {
@@ -1150,7 +1248,7 @@ static int pca953x_regcache_sync(struct device *dev)
return ret;
}
- regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ regaddr = chip->recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) {
@@ -1214,6 +1312,7 @@ static int pca953x_resume(struct device *dev)
#endif
/* convenience to stop overlong match-table lines */
+#define OF_653X(__nrgpio, __int) ((void *)(__nrgpio | PCAL653X_TYPE | __int))
#define OF_953X(__nrgpio, __int) (void *)(__nrgpio | PCA953X_TYPE | __int)
#define OF_957X(__nrgpio, __int) (void *)(__nrgpio | PCA957X_TYPE | __int)
@@ -1236,8 +1335,10 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
+ { .compatible = "nxp,pcal6408", .data = OF_953X(8, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
+ { .compatible = "nxp,pcal6534", .data = OF_653X(34, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9535", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9554b", .data = OF_953X( 8, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index bb50335239ac..870910bb9dd3 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/regmap.h>
@@ -156,6 +157,12 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
unsigned long flags;
u32 data = input ? 0 : 1;
+
+ if (input)
+ pinctrl_gpio_direction_input(bank->pin_base + offset);
+ else
+ pinctrl_gpio_direction_output(bank->pin_base + offset);
+
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -325,26 +332,15 @@ static void rockchip_irq_demux(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
- u32 pend;
+ unsigned long pending;
+ unsigned int irq;
dev_dbg(bank->dev, "got irq for bank %s\n", bank->name);
chained_irq_enter(chip, desc);
- pend = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
-
- while (pend) {
- unsigned int irq, virq;
-
- irq = __ffs(pend);
- pend &= ~BIT(irq);
- virq = irq_find_mapping(bank->domain, irq);
-
- if (!virq) {
- dev_err(bank->dev, "unmapped irq %d\n", irq);
- continue;
- }
-
+ pending = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
+ for_each_set_bit(irq, &pending, 32) {
dev_dbg(bank->dev, "handling irq %d\n", irq);
/*
@@ -378,7 +374,7 @@ static void rockchip_irq_demux(struct irq_desc *desc)
} while ((data & BIT(irq)) != (data_old & BIT(irq)));
}
- generic_handle_irq(virq);
+ generic_handle_domain_irq(bank->domain, irq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 443fe975bf13..e62ee7e56908 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -230,6 +230,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
tc3589x_gpio->regs[REG_DIRECT][regoffset] |= mask;
+ gpiochip_disable_irq(gc, offset);
}
static void tc3589x_gpio_irq_unmask(struct irq_data *d)
@@ -240,17 +241,20 @@ static void tc3589x_gpio_irq_unmask(struct irq_data *d)
int regoffset = offset / 8;
int mask = BIT(offset % 8);
+ gpiochip_enable_irq(gc, offset);
tc3589x_gpio->regs[REG_IE][regoffset] |= mask;
tc3589x_gpio->regs[REG_DIRECT][regoffset] &= ~mask;
}
-static struct irq_chip tc3589x_gpio_irq_chip = {
+static const struct irq_chip tc3589x_gpio_irq_chip = {
.name = "tc3589x-gpio",
.irq_bus_lock = tc3589x_gpio_irq_lock,
.irq_bus_sync_unlock = tc3589x_gpio_irq_sync_unlock,
.irq_mask = tc3589x_gpio_irq_mask,
.irq_unmask = tc3589x_gpio_irq_unmask,
.irq_set_type = tc3589x_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
@@ -321,7 +325,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.base = -1;
girq = &tc3589x_gpio->chip.irq;
- girq->chip = &tc3589x_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &tc3589x_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 5046e51af8df..c1bb2c3ca6f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -465,8 +465,6 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
REG_GPIO_DEBEN1, 3);
}
-static int gpio_twl4030_remove(struct platform_device *pdev);
-
static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
struct twl4030_gpio_platform_data *pdata)
{
@@ -494,6 +492,18 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
return omap_twl_info;
}
+/* Cannot use as gpio_twl4030_probe() calls us */
+static int gpio_twl4030_remove(struct platform_device *pdev)
+{
+ struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&priv->gpio_chip);
+
+ /* REVISIT no support yet for deregistering all the IRQs */
+ WARN_ON(!is_module());
+ return 0;
+}
+
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -590,18 +600,6 @@ out:
return ret;
}
-/* Cannot use as gpio_twl4030_probe() calls us */
-static int gpio_twl4030_remove(struct platform_device *pdev)
-{
- struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->gpio_chip);
-
- /* REVISIT no support yet for deregistering all the IRQs */
- WARN_ON(!is_module());
- return 0;
-}
-
static const struct of_device_id twl_gpio_match[] = {
{ .compatible = "ti,twl4030-gpio", },
{ },
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 386e69300332..676adf1f198a 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/ucb1400.h>
+#include <linux/gpio/driver.h>
static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
{
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 59fb10641598..e73885a4dc32 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -27,7 +27,8 @@ module_param_hw_array(base, uint, ioport, &num_ws16c48, 0);
MODULE_PARM_DESC(base, "WinSystems WS16C48 base addresses");
static unsigned int irq[MAX_NUM_WS16C48];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
@@ -501,7 +502,7 @@ static struct isa_driver ws16c48_driver = {
},
};
-module_isa_driver(ws16c48_driver, num_ws16c48);
+module_isa_driver_with_irq(ws16c48_driver, num_ws16c48, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("WinSystems WS16C48 GPIO driver");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9be1376f9a62..a7d2358736fe 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -32,9 +32,16 @@ MODULE_PARM_DESC(ignore_wake,
"controller@pin combos on which to ignore the ACPI wake flag "
"ignore_wake=controller@pin[,controller@pin[,...]]");
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
struct acpi_gpiolib_dmi_quirk {
bool no_edge_events_on_boot;
char *ignore_wake;
+ char *ignore_interrupt;
};
/**
@@ -317,14 +324,15 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
+ unsigned int pin_in)
{
const char *controller, *pin_str;
unsigned int pin;
char *endp;
int len;
- controller = ignore_wake;
+ controller = ignore_list;
while (controller) {
pin_str = strchr(controller, '@');
if (!pin_str)
@@ -348,7 +356,7 @@ static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin
return false;
err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_wake: %s\n", ignore_wake);
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
return false;
}
@@ -360,7 +368,7 @@ static bool acpi_gpio_irq_is_wake(struct device *parent,
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
- if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -427,6 +435,11 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
goto fail_unlock_irq;
}
+ if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
+ dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
+ return AE_OK;
+ }
+
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
goto fail_unlock_irq;
@@ -741,6 +754,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.pin_config = agpio->pin_config;
lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
+ lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
/*
* Polarity and triggering are only specified for GpioInt
@@ -987,10 +1001,11 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
}
/**
- * acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
+ * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
* @adev: pointer to a ACPI device to get IRQ from
* @name: optional name of GpioInt resource
* @index: index of GpioInt resource (starting from %0)
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* If the device has one or more GpioInt resources, this function can be
* used to translate from the GPIO offset in the resource to the Linux IRQ
@@ -1002,9 +1017,13 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function takes optional @name parameter. If the resource has a property
* name, then only those will be taken into account.
*
+ * The GPIO is considered wake capable if the GpioInt resource specifies
+ * SharedAndWake or ExclusiveAndWake.
+ *
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
-int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable)
{
int idx, i;
unsigned int irq_flags;
@@ -1061,13 +1080,16 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
}
+ if (wake_capable)
+ *wake_capable = info.wake_capable;
+
return irq;
}
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
+EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
static acpi_status
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
@@ -1563,6 +1585,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "INT33FF:01@0",
},
},
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
{} /* Terminating entry */
};
@@ -1585,6 +1621,9 @@ static int __init acpi_gpio_setup_params(void)
if (ignore_wake == NULL && quirk && quirk->ignore_wake)
ignore_wake = quirk->ignore_wake;
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index e476558d9471..1ac6816839db 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -18,6 +18,7 @@ struct acpi_device;
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -28,6 +29,7 @@ struct acpi_gpio_info {
int pin_config;
int polarity;
int triggering;
+ bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 92f185575e94..0cb6b468f364 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1497,6 +1497,21 @@ static int linereq_release(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
+{
+ struct linereq *lr = file->private_data;
+ struct device *dev = &lr->gdev->dev;
+ u16 i;
+
+ seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
+
+ for (i = 0; i < lr->num_lines; i++)
+ seq_printf(out, "gpio-line:\t%d\n",
+ gpio_chip_hwgpio(lr->lines[i].desc));
+}
+#endif
+
static const struct file_operations line_fileops = {
.release = linereq_release,
.read = linereq_read,
@@ -1507,6 +1522,9 @@ static const struct file_operations line_fileops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = linereq_ioctl_compat,
#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = linereq_show_fdinfo,
+#endif
};
static int linereq_create(struct gpio_device *gdev, void __user *ip)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a037b50bef33..0e4e1291604d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -289,6 +289,36 @@ int of_get_named_gpio_flags(const struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+/* Converts gpio_lookup_flags into bitmask of GPIO_* values */
+static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
+{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (flags & OF_GPIO_SINGLE_ENDED) {
+ if (flags & OF_GPIO_OPEN_DRAIN)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (flags & OF_GPIO_TRANSITORY)
+ lflags |= GPIO_TRANSITORY;
+
+ if (flags & OF_GPIO_PULL_UP)
+ lflags |= GPIO_PULL_UP;
+
+ if (flags & OF_GPIO_PULL_DOWN)
+ lflags |= GPIO_PULL_DOWN;
+
+ if (flags & OF_GPIO_PULL_DISABLE)
+ lflags |= GPIO_PULL_DISABLE;
+
+ return lflags;
+}
+
/**
* gpiod_get_from_of_node() - obtain a GPIO from an OF node
* @node: handle of the OF node
@@ -308,26 +338,14 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
enum gpiod_flags dflags,
const char *label)
{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ unsigned long lflags;
struct gpio_desc *desc;
- enum of_gpio_flags flags;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
- bool transitory = false;
+ enum of_gpio_flags of_flags;
int ret;
- desc = of_get_named_gpiod_flags(node, propname,
- index, &flags);
-
- if (!desc || IS_ERR(desc)) {
+ desc = of_get_named_gpiod_flags(node, propname, index, &of_flags);
+ if (!desc || IS_ERR(desc))
return desc;
- }
-
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- transitory = flags & OF_GPIO_TRANSITORY;
ret = gpiod_request(desc, label);
if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
@@ -335,27 +353,7 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
- if (transitory)
- lflags |= GPIO_TRANSITORY;
-
- if (flags & OF_GPIO_PULL_UP)
- lflags |= GPIO_PULL_UP;
-
- if (flags & OF_GPIO_PULL_DOWN)
- lflags |= GPIO_PULL_DOWN;
-
- if (flags & OF_GPIO_PULL_DISABLE)
- lflags |= GPIO_PULL_DISABLE;
+ lflags = of_convert_gpio_flags(of_flags);
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
@@ -372,12 +370,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
* properties should be named "foo-gpios" so we have this special kludge for
* them.
*/
-static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *of_find_spi_gpio(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
char prop_name[32]; /* 32 is max size of property name */
- const struct device_node *np = dev->of_node;
- struct gpio_desc *desc;
/*
* Hopefully the compiler stubs the rest of the function if this
@@ -393,8 +391,7 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
/* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
- desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
- return desc;
+ return of_get_named_gpiod_flags(np, prop_name, idx, of_flags);
}
/*
@@ -402,13 +399,11 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
* lines rather than "cs-gpios" like all other SPI hardware. Account for this
* with a special quirk.
*/
-static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
+static struct gpio_desc *of_find_spi_cs_gpio(struct device_node *np,
const char *con_id,
unsigned int idx,
- unsigned long *flags)
+ enum of_gpio_flags *of_flags)
{
- const struct device_node *np = dev->of_node;
-
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return ERR_PTR(-ENOENT);
@@ -426,7 +421,7 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
* uses just "gpios" so translate to that when "cs-gpios" is
* requested.
*/
- return of_find_gpio(dev, NULL, idx, flags);
+ return of_get_named_gpiod_flags(np, "gpios", idx, of_flags);
}
/*
@@ -434,7 +429,9 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
* properties should be named "foo-gpios" so we have this special kludge for
* them.
*/
-static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *of_find_regulator_gpio(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
/* These are the connection IDs we accept as legacy GPIO phandles */
@@ -443,8 +440,6 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
"wlf,ldo1ena", /* WM8994 */
"wlf,ldo2ena", /* WM8994 */
};
- const struct device_node *np = dev->of_node;
- struct gpio_desc *desc;
int i;
if (!IS_ENABLED(CONFIG_REGULATOR))
@@ -457,12 +452,12 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
if (i < 0)
return ERR_PTR(-ENOENT);
- desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
- return desc;
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
-static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
+static struct gpio_desc *of_find_arizona_gpio(struct device_node *np,
const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
if (!IS_ENABLED(CONFIG_MFD_ARIZONA))
@@ -471,17 +466,18 @@ static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
if (!con_id || strcmp(con_id, "wlf,reset"))
return ERR_PTR(-ENOENT);
- return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
-static struct gpio_desc *of_find_usb_gpio(struct device *dev,
+static struct gpio_desc *of_find_usb_gpio(struct device_node *np,
const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
/*
- * Currently this USB quirk is only for the Fairchild FUSB302 host which is using
- * an undocumented DT GPIO line named "fcs,int_n" without the compulsory "-gpios"
- * suffix.
+ * Currently this USB quirk is only for the Fairchild FUSB302 host
+ * which is using an undocumented DT GPIO line named "fcs,int_n"
+ * without the compulsory "-gpios" suffix.
*/
if (!IS_ENABLED(CONFIG_TYPEC_FUSB302))
return ERR_PTR(-ENOENT);
@@ -489,14 +485,28 @@ static struct gpio_desc *of_find_usb_gpio(struct device *dev,
if (!con_id || strcmp(con_id, "fcs,int_n"))
return ERR_PTR(-ENOENT);
- return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
+typedef struct gpio_desc *(*of_find_gpio_quirk)(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
+ enum of_gpio_flags *of_flags);
+static const of_find_gpio_quirk of_find_gpio_quirks[] = {
+ of_find_spi_gpio,
+ of_find_spi_cs_gpio,
+ of_find_regulator_gpio,
+ of_find_arizona_gpio,
+ of_find_usb_gpio,
+ NULL
+};
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
+ const of_find_gpio_quirk *q;
struct gpio_desc *desc;
unsigned int i;
@@ -516,51 +526,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
- if (gpiod_not_found(desc)) {
- /* Special handling for SPI GPIOs if used */
- desc = of_find_spi_gpio(dev, con_id, &of_flags);
- }
-
- if (gpiod_not_found(desc)) {
- /* This quirk looks up flags and all */
- desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
- if (!IS_ERR(desc))
- return desc;
- }
-
- if (gpiod_not_found(desc)) {
- /* Special handling for regulator GPIOs if used */
- desc = of_find_regulator_gpio(dev, con_id, &of_flags);
- }
-
- if (gpiod_not_found(desc))
- desc = of_find_arizona_gpio(dev, con_id, &of_flags);
-
- if (gpiod_not_found(desc))
- desc = of_find_usb_gpio(dev, con_id, &of_flags);
+ /* Properly named GPIO was not found, try workarounds */
+ for (q = of_find_gpio_quirks; gpiod_not_found(desc) && *q; q++)
+ desc = (*q)(dev->of_node, con_id, idx, &of_flags);
if (IS_ERR(desc))
return desc;
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
-
- if (of_flags & OF_GPIO_SINGLE_ENDED) {
- if (of_flags & OF_GPIO_OPEN_DRAIN)
- *flags |= GPIO_OPEN_DRAIN;
- else
- *flags |= GPIO_OPEN_SOURCE;
- }
-
- if (of_flags & OF_GPIO_TRANSITORY)
- *flags |= GPIO_TRANSITORY;
-
- if (of_flags & OF_GPIO_PULL_UP)
- *flags |= GPIO_PULL_UP;
- if (of_flags & OF_GPIO_PULL_DOWN)
- *flags |= GPIO_PULL_DOWN;
- if (of_flags & OF_GPIO_PULL_DISABLE)
- *flags |= GPIO_PULL_DISABLE;
+ *flags = of_convert_gpio_flags(of_flags);
return desc;
}
@@ -618,16 +591,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (IS_ERR(desc))
return desc;
- if (xlate_flags & OF_GPIO_ACTIVE_LOW)
- *lflags |= GPIO_ACTIVE_LOW;
- if (xlate_flags & OF_GPIO_TRANSITORY)
- *lflags |= GPIO_TRANSITORY;
- if (xlate_flags & OF_GPIO_PULL_UP)
- *lflags |= GPIO_PULL_UP;
- if (xlate_flags & OF_GPIO_PULL_DOWN)
- *lflags |= GPIO_PULL_DOWN;
- if (xlate_flags & OF_GPIO_PULL_DISABLE)
- *lflags |= GPIO_PULL_DISABLE;
+ *lflags = of_convert_gpio_flags(xlate_flags);
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cc9c0a12259e..4756ea08894f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3799,6 +3799,72 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
}
/**
+ * fwnode_get_named_gpiod - obtain a GPIO from firmware node
+ * @fwnode: handle of the firmware node
+ * @propname: name of the firmware property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * The function properly finds the corresponding GPIO using whatever is the
+ * underlying firmware interface and then makes sure that the GPIO
+ * descriptor is requested before it is returned to the caller.
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+static struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ struct gpio_desc *desc = ERR_PTR(-ENODEV);
+ int ret;
+
+ if (is_of_node(fwnode)) {
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
+ } else if (is_acpi_node(fwnode)) {
+ struct acpi_gpio_info info;
+
+ desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
+ if (IS_ERR(desc))
+ return desc;
+
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Currently only ACPI takes this path */
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
+ return desc;
+}
+
+/**
* fwnode_gpiod_get_index - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @con_id: function within the GPIO consumer
@@ -4064,72 +4130,6 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
- * fwnode_get_named_gpiod - obtain a GPIO from firmware node
- * @fwnode: handle of the firmware node
- * @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * This function can be used for drivers that get their configuration
- * from opaque firmware.
- *
- * The function properly finds the corresponding GPIO using whatever is the
- * underlying firmware interface and then makes sure that the GPIO
- * descriptor is requested before it is returned to the caller.
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- struct gpio_desc *desc = ERR_PTR(-ENODEV);
- int ret;
-
- if (is_of_node(fwnode)) {
- desc = gpiod_get_from_of_node(to_of_node(fwnode),
- propname, index,
- dflags,
- label);
- return desc;
- } else if (is_acpi_node(fwnode)) {
- struct acpi_gpio_info info;
-
- desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (IS_ERR(desc))
- return desc;
-
- acpi_gpio_update_gpiod_flags(&dflags, &info);
- acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
- } else
- return ERR_PTR(-EINVAL);
-
- /* Currently only ACPI takes this path */
- ret = gpiod_request(desc, label);
- if (ret)
- return ERR_PTR(ret);
-
- ret = gpiod_configure_flags(desc, propname, lflags, dflags);
- if (ret < 0) {
- gpiod_put(desc);
- return ERR_PTR(ret);
- }
-
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
-
-/**
* gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO
* function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 198ba846d34b..34f5a092c99e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -51,6 +51,18 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default y
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25d0ba310509..0b283e46f28b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,6 +3,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+
drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ae9371b172e3..8639a4f9c6e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
-#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
-#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
-
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -1065,7 +1062,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- uint32_t amdgpu_reset_level_mask;
bool job_hang;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9e98f3866edc..0561812aa0a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
- if (adev->kfd.dev)
- amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -137,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
@@ -201,6 +197,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources);
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
@@ -210,6 +208,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index 0b0a72ca5695..7e80caa05060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id)
lock_srbm(adev, mec, pipe, 0, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL),
+ WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6066aebf491c..de61a85c4b02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return PTR_ERR(ent);
}
- debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
-
/* Register debugfs entries for amdgpu_ttm */
amdgpu_ttm_debugfs_init(adev);
amdgpu_debugfs_pm_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ab8f970b2849..e0445e8cc342 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ /*
+ * Per PMFW team's suggestion, driver needs to handle gfxoff
+ * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
+ * scenario. Add the missing df cstate disablement here.
+ */
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -5210,7 +5218,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->job = job;
reset_context->hive = hive;
-
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
@@ -5337,11 +5344,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
- if (r && r == -EAGAIN) {
- set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
- adev->asic_reset_res = 0;
+ if (r && r == -EAGAIN)
goto retry;
- }
if (!r && gpu_reset_for_dev_remove)
goto recover_end;
@@ -5777,7 +5781,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 23998f727c7f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,8 +38,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
- .dirty = drm_atomic_helper_dirtyfb,
-};
-
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
- else
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 16f6a313335e..3c9fecdd6b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,8 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
+#include <linux/fb.h>
+#include <linux/dynamic_debug.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -186,6 +188,18 @@ int amdgpu_vcnfw_log;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 46c99331d7f1..cd968e781077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
- bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ccebd8e2a2d8..a4b47e1bd111 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ if (adev->asic_type == CHIP_IP_DISCOVERY) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
+ return true;
+ default:
+ return false;
+ }
+ }
+
return adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS ||
@@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
!amdgpu_ras_asic_supported(adev))
return;
- /* If driver run on sriov guest side, only enable ras for aldebaran */
- if (amdgpu_sriov_vf(adev) &&
- adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
- return;
-
if (!adev->gmc.xgmi.connected_to_cpu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
@@ -2877,9 +2890,9 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
err_data.err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
- if(!err_data.err_addr) {
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record in mca notifier!\n");
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in mca notifier!\n");
return NOTIFY_DONE;
}
@@ -2889,7 +2902,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
if (adev->umc.ras &&
adev->umc.ras->convert_ras_error_address)
adev->umc.ras->convert_ras_error_address(adev,
- &err_data, 0, ch_inst, umc_inst, m->addr);
+ &err_data, m->addr, ch_inst, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 9da5ead50c90..f778466bb9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
- adev->amdgpu_reset_level_mask = 0x1;
-
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
@@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
{
struct amdgpu_reset_handler *reset_handler = NULL;
- if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
- return -ENOSYS;
-
- if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
- return -ENOSYS;
-
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
@@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int ret;
struct amdgpu_reset_handler *reset_handler = NULL;
- if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
- return -ENOSYS;
-
- if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
- return -ENOSYS;
-
if (adev->reset_cntl)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index f5318fedf2f0..f4a501ff87d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
- AMDGPU_SKIP_MODE2_RESET = 2,
- AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
+ AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
};
struct amdgpu_reset_context {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3e316b013fd9..d3558c34d406 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
- return false;
-
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 3949b7e3907f..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -222,8 +222,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
adev->sdma.instance[instance].fw->data;
version_major = le16_to_cpu(header->header_version_major);
- if ((duplicate && instance) || (!duplicate && version_major > 1))
- return -EINVAL;
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
if (err)
@@ -272,7 +274,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
break;
default:
- return -EINVAL;
+ err = -EINVAL;
}
}
@@ -283,3 +285,24 @@ out:
}
return err;
}
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue) {
+ sdma = &adev->sdma.instance[i].page;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+ sdma = &adev->sdma.instance[i].ring;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d2d88279fefb..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
char *fw_name, u32 instance, bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..57277b1cf183 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -424,8 +424,9 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
struct amdgpu_res_cursor cursor;
+ u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,21 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
+ end = cursor.start + cursor.size;
+ while (cursor.remaining) {
+ amdgpu_res_next(&cursor, cursor.size);
- /* ttm_resource_ioremap only supports contiguous memory */
- if (cursor.size != mem_size)
- return false;
+ if (!cursor.remaining)
+ break;
+
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (end != cursor.start)
+ return false;
+
+ end = cursor.start + cursor.size;
+ }
- return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+ return end <= adev->gmc.visible_vram_size;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 2fb4951a6433..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,8 +22,6 @@
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
-#define UMC_INVALID_ADDR 0x1ULL
-
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -54,9 +52,8 @@ struct amdgpu_umc_ras {
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
void (*convert_ras_error_address)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr);
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e4af40b9a8aa..9c765b04aae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -726,6 +726,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d94c31e68a14..49c4347d154c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -31,6 +31,7 @@
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
/* flags for indirect register access path supported by rlcg for sriov */
#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
@@ -297,6 +298,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
+#define amdgpu_sriov_vf_mmio_access_protection(adev) \
+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 83b0c5d86e48..2291aa14d888 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ /* For asic with VF MMIO access protection
+ * avoid using CPU for VM table updates
+ */
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !amdgpu_sriov_vf_mmio_access_protection(adev))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 2b0669c464f6..69e105fa41f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
DMA_RESV_USAGE_BOOKKEEP);
}
- if (fence && !p->immediate)
+ if (fence && !p->immediate) {
+ /*
+ * Most hw generations now have a separate queue for page table
+ * updates, but when the queue is shared with userspace we need
+ * the extra CPU round trip to correctly flush the TLB.
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
+ }
dma_fence_put(f);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 251109723ab6..671ca5a0f208 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
/* Enable trap for each kfd vmid. */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL));
+ data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
@@ -5076,6 +5076,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 846ccb6cf07d..66dfb574cc7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
+ unsigned char hub_ip = 0;
+
+ hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+ GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
/*
@@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
/* a read return value of 1 means semaphore acuqire */
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, hub_ip);
if (tmp & 0x1)
break;
udelay(1);
@@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
/* Wait for ACK with a delay.*/
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+ hub->eng_distance * eng, hub_ip);
tmp &= 1 << vmid;
if (tmp)
break;
@@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng, 0);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, 0, hub_ip);
/* Issue additional private vm invalidation to MMHUB */
if ((vmhub != AMDGPU_GFXHUB_0) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 5cec6b259b7f..fef7d020bc5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1156,6 +1156,42 @@ static int mes_v11_0_sw_fini(void *handle)
return 0;
}
+static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ int i;
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ }
+ data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 0);
+ data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_HIT, 1);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ adev->mes.ring.sched.ready = false;
+}
+
static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
{
uint32_t tmp;
@@ -1207,6 +1243,9 @@ failure:
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
+ if (adev->mes.ring.sched.ready)
+ mes_v11_0_kiq_dequeue_sched(adev);
+
mes_v11_0_enable(adev, false);
return 0;
}
@@ -1262,9 +1301,6 @@ failure:
static int mes_v11_0_hw_fini(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->mes.ring.sched.ready = false;
return 0;
}
@@ -1296,7 +1332,8 @@ static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_in_reset(adev))
+ if (!amdgpu_in_reset(adev) &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index a2f04b249132..12906ba74462 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -290,7 +290,6 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index a977f0027928..e07757eea7ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -317,7 +317,6 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index fd14fa9b9cd7..288c414babdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7241a9fb0121..1122bd4eae98 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].ring;
+ int i;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -1431,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
-
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1954,8 +1935,11 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index c05c3eebde4c..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1460,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f136fec7b4f4..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
- struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
- struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1) ||
- (adev->mman.buffer_funcs_ring == sdma2) ||
- (adev->mman.buffer_funcs_ring == sdma3))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1357,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db51230163c5..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -846,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
@@ -1317,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 7aa570c1ce4a..81a6d5b94987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -31,12 +31,23 @@
#include "amdgpu_psp.h"
#include "amdgpu_xgmi.h"
+static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+#if 0
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) &&
+ adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
+ return true;
+#endif
+ return false;
+}
+
static struct amdgpu_reset_handler *
sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_reset_handler *handler;
- struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
if (reset_context->method != AMD_RESET_METHOD_NONE) {
list_for_each_entry(handler, &reset_ctl->reset_handlers,
@@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
if (handler->reset_method == reset_context->method)
return handler;
}
- } else {
- list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ }
+
+ if (sienna_cichlid_is_mode2_default(reset_ctl)) {
+ list_for_each_entry (handler, &reset_ctl->reset_handlers,
handler_list) {
- if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
- adev->pm.fw_version >= 0x3a5500 &&
- !amdgpu_sriov_vf(adev)) {
- reset_context->method = AMD_RESET_METHOD_MODE2;
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2)
return handler;
- }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 183024d7c184..e3b2b6b4f1a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
+static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* sdma doorbell range is programed by hypervisor */
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ true, adev->doorbell_index.sdma_engine[i] << 1,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
+ }
+}
+
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
+ /* HW doorbell routing policy: doorbell writing not
+ * in SDMA/IH/MM/ACV range will be routed to CP. So
+ * we need to init SDMA doorbell range prior
+ * to CP ip block init and ring test. IH already
+ * happens before CP.
+ */
+ soc15_sdma_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 16b757664a35..e08044008186 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
return false;
default:
return true;
@@ -629,13 +630,18 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
case IP_VERSION(11, 0, 3):
adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
- AMD_CG_SUPPORT_JPEG_MGCG;
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
+ amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index a0d19b768346..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint32_t channel_index;
+ uint64_t soc_pa, retired_page, column;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -453,81 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr)
+ uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint32_t channel_index;
- uint64_t mc_umc_status = 0, mc_umc_addrt0;
- uint64_t err_addr, soc_pa, retired_page, column;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
- if (mca_addr == UMC_INVALID_ADDR) {
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- mc_umc_addrt0 =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
- if (mc_umc_status == 0)
- return;
+ if (mc_umc_status == 0)
+ return;
- if (!err_data->err_addr) {
- /* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
- }
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
}
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
- /* calculate error address if ue/ce error is detected */
- if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
- mca_addr != UMC_INVALID_ADDR) {
- if (mca_addr == UMC_INVALID_ADDR) {
- err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr =
- REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- } else {
- err_addr = mca_addr;
- }
+ /* calculate error address if ue error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1 ||
- mca_addr != UMC_INVALID_ADDR) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
- if (mca_addr == UMC_INVALID_ADDR)
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -549,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
umc_v6_7_query_error_address(adev,
err_data,
umc_reg_offset, ch_inst,
- umc_inst, UMC_INVALID_ADDR);
+ umc_inst);
}
}
@@ -590,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
- .convert_ras_error_address = umc_v6_7_query_error_address,
+ .convert_ras_error_address = umc_v6_7_convert_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index a8cbda81828d..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint32_t channel_index;
+ uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
umc_inst * adev->umc.channel_inst_num +
ch_inst];
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- uint32_t addr_lsb;
- uint64_t mc_umc_addrt0;
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
- uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
- uint64_t na_err_addr, retired_page_addr;
- uint32_t col = 0;
- int ret = 0;
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
}
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
}
}
@@ -338,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_reg_offset = 0;
+
+ /* Enabling fatal error in umc node0 instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+ return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -348,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
+ .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint64_t retired_page;
+ uint32_t channel_index;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+}
+
static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
@@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
err_addr &= ~((0x1ULL << lsb) - 1);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index c70c026c9a93..2797029bd500 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- lock_page(page);
+ zone_device_page_init(page);
}
static void
@@ -410,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
dma_addr_t *scratch;
void *buf;
@@ -666,7 +666,7 @@ out_oom:
static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -674,7 +674,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long cpages = 0;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -697,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.src = buf;
migrate.dst = migrate.src + npages;
+ migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
@@ -764,7 +765,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct amdgpu_device *adev;
struct vm_area_struct *vma;
@@ -805,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
@@ -849,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger);
+ r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -950,7 +952,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, prange->start, prange->last);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index b3f0754b32fa..a5d7e6d22264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR {
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger);
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger);
+ uint32_t trigger, struct page *fault_page);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 26b53b6d673e..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -333,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f5913ba22174..64fdf63093a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2913,13 +2913,15 @@ retry_write_locked:
*/
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
@@ -3278,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_vram_to_ram(prange, mm,
+ KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
@@ -3339,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION);
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
if (!r && prange->actual_loc)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c73727e0b7d..c053cb79cd06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb[i] = &fb_info->fb[i];
switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -7478,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- dm_set_vupdate_irq(new_state->base.crtc, true);
- drm_crtc_vblank_get(new_state->base.crtc);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- dm_set_vupdate_irq(new_state->base.crtc, false);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -8242,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
- /* Count number of newly disabled CRTCs for dropping PM refs later. */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- /* For freesync config update on crtc state and params for irq */
- update_stream_irq_parameters(dm, dm_new_crtc_state);
-
- /* Handle vrr on->off / off->on transitions */
- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
- dm_new_crtc_state);
- }
-
/**
* Enable interrupts for CRTCs that are newly enabled or went through
* a modeset. It was intentionally deferred until after the front end
@@ -8268,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
- bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ crc_rd_wrk = dm->crc_rd_wrk;
#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
(!old_crtc_state->active ||
@@ -8285,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
- configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8306,14 +8306,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
- }
-
- if (configure_crc)
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
DRM_DEBUG_DRIVER("Failed to configure crc source");
-#endif
+ }
}
+#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9392,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
- goto fail;
- }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9529,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 8ca10ab3dfc1..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
- if (link->type == dc_connection_none)
+ if (link->type == dc_connection_none) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
if (link->dpcd_caps.psr_info.psr_version == 0) {
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 53b077b40d72..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -51,13 +51,6 @@
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
-struct i2c_id_config_access {
- uint8_t bfI2C_LineMux:4;
- uint8_t bfHW_EngineID:3;
- uint8_t bfHW_Capable:1;
- uint8_t ucAccess;
-};
-
static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
struct atom_i2c_record *record,
struct graphics_object_i2c_info *info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
- if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
- (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
- support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 9;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 8;
+ param = 0;
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 0x00010008;
+ param = (1 << 10);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index f0f3f66629cc..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,7 +156,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
- unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
@@ -180,27 +180,28 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- &num_levels);
- num_dcfclk_levels = num_levels;
+ &num_entries_per_clk->num_dcfclk_levels);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_socclk_levels);
+
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
- &num_levels);
- num_dtbclk_levels = num_levels;
+ &num_entries_per_clk->num_dtbclk_levels);
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- &num_levels);
- num_dispclk_levels = num_levels;
+ &num_entries_per_clk->num_dispclk_levels);
+ num_levels = num_entries_per_clk->num_dispclk_levels;
- if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ if (num_entries_per_clk->num_dcfclk_levels &&
+ num_entries_per_clk->num_dtbclk_levels &&
+ num_entries_per_clk->num_dispclk_levels)
clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
@@ -333,6 +334,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+ clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+ /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
+ /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+ }
+ }
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -352,7 +368,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -361,27 +376,25 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
- clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+ /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+ update_fclk = true;
+ }
- /* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
- }
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -390,21 +403,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
update_uclk = true;
}
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
- }
-
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
@@ -632,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -648,22 +651,34 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
- /* Refresh memclk states */
+ /* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_memclk_levels);
+
+ dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ &num_entries_per_clk->num_fclk_levels);
+
+ if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+ num_levels = num_entries_per_clk->num_memclk_levels;
+ } else {
+ num_levels = num_entries_per_clk->num_fclk_levels;
+ }
+
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 258ba5a872b1..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1734,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
+ bool subvp_prev_use = false;
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1777,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -1794,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2927,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
dc_resource_state_copy_construct(
dc->current_state, context);
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ if (dc->res_pool->funcs->remove_phantom_pipes)
+ dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
@@ -3334,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ }
+
return;
}
@@ -3495,6 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
@@ -3542,6 +3576,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
+ uint32_t i;
*is_plane_addition = false;
@@ -3573,6 +3608,36 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
+ /* For SubVP pipe split case when adding MPO video
+ * we need to add a minimal transition. In this case
+ * there will be 2 streams (1 main stream, 1 phantom
+ * stream).
+ */
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 2 &&
+ stream->mall_stream_config.type == SUBVP_MAIN) {
+ bool is_pipe_split = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+ (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+ dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+ is_pipe_split = true;
+ break;
+ }
+ }
+
+ /* determine if minimal transition is required due to SubVP*/
+ if (surface_count > 0 && is_pipe_split) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
return force_minimal_pipe_splitting;
}
@@ -3582,6 +3647,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_context = dc_create_state(dc);
enum pipe_split_policy tmp_mpc_policy;
bool temp_dynamic_odm_policy;
+ bool temp_subvp_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
@@ -3596,6 +3662,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
dc->debug.enable_single_display_2to1_odm_policy = false;
+ temp_subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
@@ -3624,6 +3693,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
dc->debug.pipe_split_policy = tmp_mpc_policy;
dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+ dc->debug.force_disable_subvp = temp_subvp_policy;
if (ret != DC_OK) {
/*this should never happen*/
@@ -4587,6 +4657,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ * @brief
+ * Submits dpia hpd int enable command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ * @return
+ * None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d19fb92333b..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1307,7 +1307,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- // Init dc_panel_config
+ /* Init dc_panel_config by HW config */
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ /* Pickup base DM settings */
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
// Override dc_panel_config if system has specific settings
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
@@ -3143,7 +3146,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (allow_active && link->type == dc_connection_none) {
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
// Don't enter PSR if panel is not connected
return false;
}
@@ -3375,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
- if(!dc->debug.disable_z10)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+ if (dc->debug.disable_z10)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index c57df45e83ff..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
return status;
}
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+
enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
@@ -964,16 +981,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->link_settings.lane_count);
if (is_repeater(link_training_setting, offset)) {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -2023,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
}
loop_count++;
}
@@ -5090,6 +5089,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
ASSERT(0);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
@@ -5098,6 +5098,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
return is_lttpr_present;
}
@@ -5134,6 +5135,7 @@ void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
*override = LTTPR_MODE_NON_LTTPR;
}
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -5146,22 +5148,34 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
if (vbios_lttpr_aware) {
- if (vbios_lttpr_force_non_transparent)
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
- else
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_TRANSPARENT;
+ }
}
if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- link->dc->caps.extended_aux_timeout_support)
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
+ }
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
return LTTPR_MODE_NON_LTTPR;
}
enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
{
- return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5179,9 +5193,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1)
+ cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
-
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
return cmd.cable_id.header.ret_status == 1;
}
@@ -5228,6 +5243,7 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
lttpr_present = dp_is_lttpr_present(link) ||
(!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+ DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
@@ -5795,7 +5811,7 @@ void detect_edp_sink_caps(struct dc_link *link)
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->dc->debug.optimize_edp_link_rate ||
+ (link->panel_config.ilr.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6744,7 +6760,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
ASSERT(link || crtc_timing); // invalid input
if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->dc->debug.optimize_edp_link_rate)
+ !link->panel_config.ilr.optimize_edp_link_rate)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8ee0d946bb2f..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
for (i = 0; i < stream_status->plane_count; i++) {
if (stream_status->plane_states[i] == plane_state) {
-
dc_plane_state_release(stream_status->plane_states[i]);
break;
}
@@ -3683,4 +3682,56 @@ bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
(stream->timing.h_sync_width % 2 == 0);
}
return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+ const struct resource_pool *pool = dc->res_pool;
+
+ sec_top = sec_pipe->top_pipe;
+ sec_bottom = sec_pipe->bottom_pipe;
+ sec_next = sec_pipe->next_odm_pipe;
+ sec_prev = sec_pipe->prev_odm_pipe;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->top_pipe = sec_top;
+ sec_pipe->bottom_pipe = sec_bottom;
+ sec_pipe->next_odm_pipe = sec_next;
+ sec_pipe->prev_odm_pipe = sec_prev;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
+ }
+
+ return true;
} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ae13887756bf..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -276,6 +276,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -382,6 +384,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -520,9 +524,9 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
/* remove writeback info for disabled writeback pipes from stream */
- for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
+ for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (i != j)
+ if (j < i)
/* trim the array */
stream->writeback_info[j] = stream->writeback_info[i];
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2ecf36e6329b..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.205"
+#define DC_VER "3.2.207"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -821,7 +821,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
- bool optimize_edp_link_rate; /* eDP ILR */
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -1192,6 +1191,8 @@ struct dc_plane_state {
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
+
+ bool is_statically_allocated;
};
struct dc_plane_info {
@@ -1611,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 89d7d3fd3321..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
#include "dc_hw_types.h"
#include "core_types.h"
#include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -780,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// Store the original watermark value for this SubVP config so we can lower it when the
// MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
- dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+ (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
@@ -880,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw0_enabled,
diag_data.is_cw6_enabled);
}
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state != NULL) {
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ return false;
+ }
+
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+ pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ return false;
+}
+
+static void dc_build_cursor_update_payload0(
+ struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+ struct dmub_cmd_update_cursor_payload0 *payload)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ unsigned int panel_inst = 0;
+
+ if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+ pipe_ctx->stream->link, &panel_inst))
+ return;
+
+ /* Payload: Cursor Rect is built from position & attribute
+ * x & y are obtained from postion
+ */
+ payload->cursor_rect.x = hubp->cur_rect.x;
+ payload->cursor_rect.y = hubp->cur_rect.y;
+ /* w & h are obtained from attribute */
+ payload->cursor_rect.width = hubp->cur_rect.w;
+ payload->cursor_rect.height = hubp->cur_rect.h;
+
+ payload->enable = hubp->pos.cur_ctl.bits.cur_enable;
+ payload->pipe_idx = p_idx;
+ payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ payload->panel_inst = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+ union dmub_rb_cmd *cmd)
+{
+ dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+ struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw;
+ pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+ pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+ pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+ /* dpp */
+ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+ pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+ struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+ pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw;
+ pl_A->aHubp.size.raw = hubp->att.size.raw;
+ pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+ /* dpp */
+ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+ struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ union dmub_cmd_update_cursor_info_data *update_cursor_info =
+ &cmd.update_cursor_info.update_cursor_info_data;
+
+ if (!dc_dmub_should_update_cursor_data(pCtx))
+ return;
+ /*
+ * Since we use multi_cmd_pending for dmub command, the 2nd command is
+ * only assigned to store cursor attributes info.
+ * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+ * is to store cursor position info.
+ *
+ * Command heaer type must be the same type if using multi_cmd_pending.
+ * Besides, while process 2nd command in DMU, the sub type is useless.
+ * So it's meanless to pass the sub type header with different type.
+ */
+
+ {
+ /* Build Payload#0 Header */
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes =
+ sizeof(cmd.update_cursor_info.update_cursor_info_data);
+ cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+ /* Prepare Payload */
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+ dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+ /* Send update_curosr_info to queue */
+ dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+ {
+ /* Build Payload#1 Header */
+ memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+ dc_build_cursor_attribute_update_payload1(
+ &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+ dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7e438345b1a8..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -88,4 +88,5 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index bf5f9e2773bc..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -138,6 +138,10 @@ struct dc_panel_config {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;
} dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
};
/*
* A link contains one or more sinks and their connected status.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: Failure: operation_result=%d",
- (int)operation_result);
goto fail;
}
}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
fail:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
LOG_FLAG_Error_I2cAux,
- "dce_aux_transfer_with_retries: FAILURE");
+ "%s: Failure: operation_result=%d",
+ __func__,
+ (int)operation_result);
if (!payload_reply)
payload->reply = NULL;
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
- WPP_BIT_FLAG_DC_ERROR,
- "AUX transaction failed. Result: %d",
- operation_result);
-
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 72521749c01d..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2244,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2254,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
+
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream->vblank_synchronized = false;
}
- for (i = 1; i < group_size; i++)
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
+ }
DC_SYNC_INFO("Waiting for trigger\n");
@@ -2268,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
- for (i = 1; i < group_size; i++)
+ if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
+ }
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -3005,6 +3025,7 @@ void dcn10_prepare_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3027,8 +3048,11 @@ void dcn10_prepare_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3041,6 +3065,7 @@ void dcn10_optimize_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3064,8 +3089,11 @@ void dcn10_optimize_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3344,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool dcn10_dmub_should_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct dc_debug_options *debug)
-{
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dcn10_can_pipe_disable_cursor(pipe_ctx))
- return false;
-
- if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
- return true;
-
- return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct hubp *hubp,
- const struct dc_cursor_mi_param *param,
- const struct dc_cursor_position *cur_pos,
- const struct dc_cursor_attributes *cur_attr)
-{
- union dmub_rb_cmd cmd;
- struct dmub_cmd_update_cursor_info_data *update_cursor_info;
- const struct dc_cursor_position *pos;
- const struct dc_cursor_attributes *attr;
- int src_x_offset = 0;
- int src_y_offset = 0;
- int x_hotspot = 0;
- int cursor_height = 0;
- int cursor_width = 0;
- uint32_t cur_en = 0;
- unsigned int panel_inst = 0;
-
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
- if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
- return;
- /**
- * if cur_pos == NULL means the caller is from cursor_set_attribute
- * then driver use previous cursor position data
- * if cur_attr == NULL means the caller is from cursor_set_position
- * then driver use previous cursor attribute
- * if cur_pos or cur_attr is not NULL then update it
- */
- if (cur_pos != NULL)
- pos = cur_pos;
- else
- pos = &hubp->curs_pos;
-
- if (cur_attr != NULL)
- attr = cur_attr;
- else
- attr = &hubp->curs_attr;
-
- if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
- return;
-
- src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
- x_hotspot = pos->x_hotspot;
- cursor_height = (int)attr->height;
- cursor_width = (int)attr->width;
- cur_en = pos->enable ? 1:0;
-
- // Rotated cursor width/height and hotspots tweaks for offset calculation
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
-
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
- }
-
- if (src_x_offset >= (int)param->viewport.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (src_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (src_y_offset >= (int)param->viewport.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (src_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
- // Cursor bitmaps have different hotspot values
- // There's a possibility that the above logic returns a negative value, so we clamp them to 0
- if (src_x_offset < 0)
- src_x_offset = 0;
- if (src_y_offset < 0)
- src_y_offset = 0;
-
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
- update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
- update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
- update_cursor_info->cursor_rect.width = attr->width;
- update_cursor_info->cursor_rect.height = attr->height;
- update_cursor_info->enable = cur_en;
- update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
- update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
- update_cursor_info->panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3699,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
- dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
@@ -3707,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- struct dc_cursor_mi_param param = { 0 };
-
- /**
- * If enter PSR without cursor attribute update
- * the cursor attribute of dmub_restore_plane
- * are initial value. call dmub to exit PSR and
- * restore plane then update cursor attribute to
- * avoid override with initial value
- */
- if (pipe_ctx->plane_state != NULL) {
- param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
- param.viewport = pipe_ctx->plane_res.scl_data.viewport;
- param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
- param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
- param.rotation = pipe_ctx->plane_state->rotation;
- param.mirror = pipe_ctx->plane_state->horizontal_mirror;
- dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
- }
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
@@ -3810,28 +3697,14 @@ void dcn10_calc_vupdate_position(
uint32_t *start_line,
uint32_t *end_line)
{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt.lines_offset;
- int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- int start_position;
-
- if (vline_int_offset_from_vupdate > 0)
- vline_int_offset_from_vupdate--;
- else if (vline_int_offset_from_vupdate < 0)
- vline_int_offset_from_vupdate++;
-
- start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (start_position >= 0)
- *start_line = start_position;
+ if (vupdate_pos >= 0)
+ *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
- *start_line = dc_crtc_timing->v_total + start_position - 1;
-
- *end_line = *start_line + 2;
-
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
@@ -3840,23 +3713,27 @@ static void dcn10_cal_vline_position(
uint32_t *start_line,
uint32_t *end_line)
{
- switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
- case START_V_UPDATE:
- dcn10_calc_vupdate_position(
- dc,
- pipe_ctx,
- start_line,
- end_line);
- break;
- case START_V_SYNC:
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
+
+ if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+ if (vline_pos > 0)
+ vline_pos--;
+ else if (vline_pos < 0)
+ vline_pos++;
+
+ vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+ if (vline_pos >= 0)
+ *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+ else
+ *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
// vsync is line 0 so start_line is just the requested line offset
- *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
- *end_line = *start_line + 2;
- break;
- default:
+ *start_line = vline_pos;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else
ASSERT(0);
- break;
- }
}
void dcn10_setup_periodic_interrupt(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ea7739255119..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, v_total);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, v_total);
+ optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
@@ -649,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t regval = 0;
-
- regval = REG_READ(OTG_CONTROL);
-
- /* otg is not running, do not need to be locked */
- if ((regval & 0x1) == 0x0)
- return;
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -663,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
- }
}
void optc1_unlock(struct timing_generator *optc)
@@ -679,16 +667,6 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
-bool optc1_is_locked(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t locked;
-
- REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
- return (locked == 1);
-}
-
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -941,11 +919,7 @@ void optc1_set_drr(
}
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
@@ -964,11 +938,7 @@ void optc1_set_drr(
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
@@ -1583,11 +1553,11 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 6323ca6dc3b3..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -654,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
}
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ bool res;
ctx->dc_bios->regs = &bios_regs;
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
- DC_FP_START();
- if (!dc->debug.disable_pplib_clock_request)
- dcn_bw_update_from_pplib(dc);
+
+ if (!dc->debug.disable_pplib_clock_request) {
+ /*
+ * TODO: This is not the proper way to obtain
+ * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+ */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_fclks(dc, &fclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+ }
+
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_START();
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+ DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
- DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1d271fe9e64..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1862,24 +1862,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpcc_pipe;
-
- if (pipe->vtp_locked) {
- dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
- pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
- pipe->vtp_locked = false;
-
- for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
@@ -2018,6 +2000,10 @@ void dcn20_optimize_bandwidth(
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2033,9 +2019,6 @@ void dcn20_optimize_bandwidth(
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
}
}
- /* increase compbuf size */
- if (hubbub->funcs->program_compbuf_size)
- hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active, prefetch_done;
+ uint32_t riommu_active;
int i;
- REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
- if (prefetch_done) {
- hubbub->riommu_active = true;
- return;
- }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .optimize_edp_link_rate = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+ .get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
+
+ dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+ dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+ dpp_base->att.cur0_ctl.bits.mode = color_format;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,13 +319,13 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -366,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 3a3b2ac791c7..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1655,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ context->bw_ctx.dml.vba.VoltageLevel = 0;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
@@ -1873,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 559e563d5bc1..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -852,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
vmid->masks = &vmid_masks;
}
- hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+ hubbub3->num_vmid = res_cap_dcn301.num_vmid;
return &hubbub3->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 52fb2bf3d578..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
uint32_t h_back_porch;
uint32_t h_width;
uint32_t v_height;
- unsigned long long v_freq;
+ uint64_t v_freq;
uint8_t misc0 = 0;
uint8_t misc1 = 0;
uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
- v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
*
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
{
struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
uint32_t dmdata_packet_enabled = 0;
- bool sdp_stream_enable = false;
- if (info_frame->vsc.valid) {
+ if (info_frame->vsc.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
&info_frame->vsc,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->spd.valid) {
+
+ if (info_frame->spd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
&info_frame->spd,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->hdrsmd.valid) {
+
+ if (info_frame->hdrsmd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
&info_frame->hdrsmd,
true);
- sdp_stream_enable = true;
- }
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
-
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -260,7 +259,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8c1a6fb36306..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,9 +888,8 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .optimize_edp_link_rate = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
return out;
}
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 0d2ffb692957..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -262,7 +262,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
-void enc314_stream_encoder_dp_blank(
+static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 24ec71cbd3e3..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -881,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = true, /*hw not support it*/
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -914,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.seamless_boot_odm_combine = true
};
@@ -936,6 +936,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1675,6 +1681,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_END();
}
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1697,6 +1708,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn314_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.psr_power_use_phy_fsm = 0,
};
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1721,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
- .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index fdae6aa89908..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -150,12 +150,6 @@ static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
-
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
-void enc32_set_dig_output_mode(
- struct link_encoder *enc,
- uint8_t pix_per_container);
-
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 0e9dce414641..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+ /* math borrowed from function of same name in inc/resource
+ * checks if h_timing is divisible by 2
+ */
+
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (timing) {
+ h_blank_start = timing->h_total - timing->h_front_porch;
+ h_blank_end = h_blank_start - timing->h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (timing->h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (timing->h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+ /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+ return is_h_timing_divisible_by_2(timing) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
static void enc32_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -355,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode);
+ REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
@@ -378,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t fifo_enabled;
-
- REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
- if (fifo_enabled == 0) {
- /* reset DIG resync FIFO */
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- /* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
- //REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
- udelay(1);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
- }
-}
-
static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -425,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc32_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -93,7 +95,7 @@
SRI(DIG_FIFO_CTRL0, DIG, id)
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -106,6 +108,7 @@
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
@@ -244,15 +247,6 @@
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
- SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index f6d3da475835..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -936,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.program_watermarks = hubbub32_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub32_init_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2038cbda33f7..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
uint32_t reg_val;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* For phantom pipe enable, disable GSL */
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
@@ -179,12 +181,12 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
- .hubp_set_flip_int = hubp1_set_flip_int
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index a750343ca521..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -206,8 +206,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i;
- int j;
+ int i, j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
uint32_t cursor_size = 0;
@@ -630,10 +629,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
params = &dpp_base->degamma_params;
}
- result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+ dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
- if (result &&
- pipe_ctx->stream_res.opp &&
+ if (pipe_ctx->stream_res.opp &&
pipe_ctx->stream_res.opp->ctx &&
hws->funcs.set_mcm_luts)
result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
@@ -991,6 +989,10 @@ void dcn32_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
+
+ /* Enable support for ODM and windowed MPO if policy flag is set */
+ if (dc->debug.enable_single_display_2to1_odm_policy)
+ dc->config.enable_windowed_mpo_odm = true;
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1145,23 +1147,25 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
true);
}
- // Don't program pixel clock after link is already enabled
-/* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings)) {
- BREAK_TO_DEBUGGER();
- }*/
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- if (pipe_ctx->stream_res.dsc)
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+ /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+ current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+ struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ /* disconnect DSC block from stream */
+ dsc->funcs->dsc_disconnect(dsc);
+ }
+ }
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
@@ -1189,7 +1193,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1226,7 +1230,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
uint32_t pix_per_cycle = 1;
params.opp_cnt = 1;
@@ -1245,7 +1248,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
params.timing.pix_clk_100hz /= 2;
pix_per_cycle = 2;
}
@@ -1262,6 +1265,9 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
@@ -1394,7 +1400,7 @@ bool dcn32_dsc_pg_status(
break;
}
- return pwr_status == 0 ? true : false;
+ return pwr_status == 0;
}
void dcn32_update_dsc_pg(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index ec3989d37086..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,7 +151,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
- 1, 100000);
+ 1, 150000);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 05de97ea855f..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1680,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->is_phantom = true;
+
dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
@@ -1749,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
pipe->stream->mall_stream_config.type = SUBVP_NONE;
pipe->stream->mall_stream_config.paired_stream = NULL;
}
+
+ if (pipe->plane_state) {
+ pipe->plane_state->is_phantom = false;
+ }
}
return removed_pipe;
}
@@ -1798,14 +1804,39 @@ bool dcn32_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ struct mall_temp_config mall_temp_config;
+
+ /* To handle Freesync properly, setting FreeSync DML parameters
+ * to its default state for the first stage of validation
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
DC_LOGGER_INIT(dc->ctx->logger);
+ /* For fast validation, there are situations where a shallow copy of
+ * of the dc->current_state is created for the validation. In this case
+ * we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt
+ * to add it back if it's fast validation). If we don't restore the
+ * subvp config in cases of fast validation + shallow copy of the
+ * dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ */
+ if (fast_validate) {
+ memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+ dcn32_save_mall_state(dc, context, &mall_temp_config);
+ }
+
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
+ if (fast_validate)
+ dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 55945cca2260..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -45,6 +45,17 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+ struct mall_stream_config mall_stream_config[MAX_PIPES];
+ bool is_phantom_plane[MAX_PIPES];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -108,6 +119,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -120,6 +133,15 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index a2a70a1572b7..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -233,6 +233,23 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+ return true;
+ }
+ return false;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -363,3 +380,74 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
} else
dcn32_determine_det_override(dc, context, pipes);
}
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ * validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+ if (pipe->plane_state)
+ temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+ }
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+ if (pipe->plane_state)
+ pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index aed0f689cbbf..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -94,8 +94,6 @@
#include "dcn20/dcn20_vmid.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
enum dcn321_clk_src_array_id {
DCN321_CLK_SRC_PLL0,
@@ -1606,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1656,7 +1654,7 @@ static bool dcn321_resource_construct(
#undef REG_STRUCT
#define REG_STRUCT dccg_regs
- dccg_regs_init();
+ dccg_regs_init();
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index cb81ed2fbd53..ca7d24000621 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -34,7 +34,7 @@ dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
endif
endif
@@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
return dcf_clk;
}
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks)
{
- int i;
-
- if (clks->num_levels == 0)
- return false;
-
- for (i = 0; i < clks->num_levels; i++)
- /* Ensure that the result is sane */
- if (clks->data[i].clocks_in_khz == 0)
- return false;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
- return true;
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks->num_levels -
+ (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks->num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
}
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks)
{
- struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
- bool res;
- unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
- /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
- if (res)
- res = verify_clock_values(&fclks);
-
- if (res) {
- ASSERT(fclks.num_levels);
-
- vmin0p65_idx = 0;
- vmid0p72_idx = fclks.num_levels -
- (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
- vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
- vmax0p9_idx = fclks.num_levels - 1;
-
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
- 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
-
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
- if (res)
- res = verify_clock_values(&dcfclks);
+ if (dcfclks->num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+ }
+}
- if (res && dcfclks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz)
+{
+ *min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ *min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ *socclk_khz = dc->dcn_soc->socclk * 1000;
}
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
- int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
- socclk_khz = dc->dcn_soc->socclk * 1000;
-
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b6e99eefe869..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -292,6 +292,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -459,13 +460,30 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+ /* For 315 pstate change is only supported if possible in vactive */
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ else
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+ }
+}
+
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_dpp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -486,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
/* Set A:
* All clocks min required
*
@@ -568,16 +520,17 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_dpp_count++;
+
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -594,6 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -739,7 +695,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
else
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 8dfe639b6508..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
@@ -3775,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+ int i, total_pipes = 0;
+ for (i = 0; i < NumberOfActivePlanes; i++)
+ total_pipes += NoOfDPPThisState[i];
+ *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+ *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -4533,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 0571700f53f9..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -243,7 +243,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
-/**
+/*
* Finds dummy_latency_index when MCLK switching using firmware based
* vblank stretch is enabled. This function will iterate through the
* table of dummy pstate latencies until the lowest value that allows
@@ -290,15 +290,14 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
@@ -331,8 +330,9 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
/**
- * *******************************************************************************************
- * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
*
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
@@ -343,12 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
* MPC is required
*
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipe_e2e: DML pipe end to end context
- *
- * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
- *
- * *******************************************************************************************
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
*/
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e)
@@ -504,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
}
/**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
@@ -517,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
@@ -592,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
}
/**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
@@ -625,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
}
/**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -639,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
@@ -711,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
}
/**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
@@ -723,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
- * @dc: current dc state
- * @context: new dc state
- *
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
@@ -764,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
}
/**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -772,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -836,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -845,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
{
@@ -914,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
/**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -922,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -1003,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
@@ -1115,7 +1097,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
- !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+ (*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
@@ -1597,6 +1580,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
/*MPC split rules will handle this case*/
pipe->bottom_pipe->top_pipe = NULL;
} else {
+ /* when merging an ODM pipes, the bottom MPC pipe must now point to
+ * the previous ODM pipe and its associated stream assets
+ */
if (pipe->prev_odm_pipe->bottom_pipe) {
/* 3 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
@@ -1606,6 +1592,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
}
+
+ memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
}
}
@@ -1781,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
@@ -1816,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
dm_dram_clock_change_unsupported;
}
@@ -1902,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+ }
+
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
@@ -2275,7 +2268,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
return 0;
}
-/**
+/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 75be1e1ce543..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -2252,9 +2254,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
- || mode_lib->vba.DSCInputBitPerComponent[k] >
- mode_lib->vba.MaximumDSCBitsPerComponent)) {
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+ || mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
@@ -2330,16 +2331,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
- if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+ if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
- if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+ if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
@@ -2478,8 +2478,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
}
- m = 0;
-
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2856,8 +2854,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- m = 0;
-
//Calculate Return BW
for (i = 0; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -3618,11 +3614,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
- if (mode_lib->vba.ModeSupport[i][0] == true) {
+ if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
- } else {
+ else
MaximumMPCCombine = 1;
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index f5400eda07a5..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -114,6 +114,7 @@ void dml_init_instance(struct display_mode_lib *lib,
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
+ case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
case DML_PROJECT_DCN314:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index b1878a1440e2..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,6 +40,7 @@ enum dml_project {
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
+ DML_PROJECT_DCN315,
DML_PROJECT_DCN31_FPGA,
DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
uint32_t controller_id);
@@ -232,6 +234,7 @@ struct resource_funcs {
unsigned int index);
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
};
struct audio_support{
@@ -438,7 +441,6 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
- bool vtp_locked;
};
/* Data used for dynamic link encoder assignment.
@@ -492,6 +494,8 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int legacy_svp_drr_stream_index;
+ bool legacy_svp_drr_stream_index_valid;
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks);
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d9f1b0a4fbd4..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
unsigned int wck_ratio;
};
+struct clk_limit_num_entries {
+ unsigned int num_dcfclk_levels;
+ unsigned int num_fclk_levels;
+ unsigned int num_memclk_levels;
+ unsigned int num_socclk_levels;
+ unsigned int num_dtbclk_levels;
+ unsigned int num_dispclk_levels;
+ unsigned int num_dppclk_levels;
+ unsigned int num_phyclk_levels;
+ unsigned int num_phyclk_d18_levels;
+};
+
/* This table is contiguous */
struct clk_limit_table {
struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
- unsigned int num_entries;
+ struct clk_limit_num_entries num_entries_per_clk;
+ unsigned int num_entries; /* highest populated dpm level for back compatibility */
};
struct wm_range_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..45645f9fd86c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_position_cfg {
+ struct {
+ uint32_t x_pos: 16;
+ uint32_t y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union reg_hot_spot_cfg {
+ struct {
+ uint32_t x_hot: 16;
+ uint32_t y_hot: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+
+struct cursor_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+ struct cursor_attribute_cache_hubp aHubp;
+ struct cursor_attribute_cache_dpp aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
#define __DAL_DPP_H__
#include "transform.h"
+#include "cursor_reg_cache.h"
union defer_reg_writes {
struct {
@@ -58,6 +59,9 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+
+ struct cursor_position_cache_dpp pos;
+ struct cursor_attribute_cache_dpp att;
};
struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
#define __DAL_HUBP_H__
#include "mem_input.h"
+#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
bool power_gated;
+
+ struct cursor_position_cache_hubp pos;
+ struct cursor_attribute_cache_hubp att;
+ struct cursor_rect cur_rect;
};
struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
- bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c37d1141febe..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -230,4 +230,10 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 7d3147175ca2..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 9522fe0b36c9..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -37,7 +37,7 @@ void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
-void virtual_disable_link_output(struct dc_link *link,
+static void virtual_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
bool disable_dpia;
bool usb4_cm_version;
bool fw_in_system_memory;
+ bool dpia_hpd_int_enable_supported;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5d1aadade8a5..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
+ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved : 17; /**< reserved */
+ uint32_t reserved : 16; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
/**
* Command type used for all VBIOS interface commands.
*/
+
+ /**
+ * Command type used to set DPIA HPD interrupt state
+ */
+ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
DMUB_CMD__VBIOS = 128,
};
@@ -760,11 +767,6 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
-enum dmub_cmd_header_sub_type {
- DMUB_CMD__SUB_TYPE_GENERAL = 0,
- DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
-};
-
#pragma pack(push, 1)
/**
@@ -1261,6 +1263,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+ struct dmub_cmd_header header; /* header */
+ uint32_t enable; /* dpia hpd interrupt enable */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -2089,7 +2099,99 @@ struct dmub_rb_cmd_update_dirty_rect {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_position_cfg {
+ struct {
+ uint32_t cur_x_pos: 16;
+ uint32_t cur_y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union dmub_reg_hot_spot_cfg {
+ struct {
+ uint32_t hot_x: 16;
+ uint32_t hot_y: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union dmub_reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+ struct dmub_cursor_position_cache_hubp pHubp;
+ struct dmub_cursor_position_cache_dpp pDpp;
+ uint8_t pipe_idx;
+ /*
+ * Padding is required. To be 4 Bytes Aligned.
+ */
+ uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union dmub_reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+ struct dmub_cursor_attribute_cache_hubp aHubp;
+ struct dmub_cursor_attribute_cache_dpp aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
/**
* Cursor dirty rects.
*/
@@ -2116,6 +2218,20 @@ struct dmub_cmd_update_cursor_info_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * Cursor Position Register.
+ * Registers contains Hubp & Dpp modules
+ */
+ struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+ struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+ struct dmub_cmd_update_cursor_payload0 payload0;
+ struct dmub_cmd_update_cursor_payload1 payload1;
};
/**
* Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2128,7 +2244,7 @@ struct dmub_rb_cmd_update_cursor_info {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
- struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+ union dmub_cmd_update_cursor_info_data update_cursor_info_data;
};
/**
@@ -2825,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
uint32_t tg_inst;
- uint32_t enable_manual_trigger;
- uint32_t clear_force_vsync;
};
struct dmub_rb_cmd_drr_update {
@@ -3235,6 +3347,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_rb_cmd_query_hpd_state query_hpd;
+ /**
+ * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+ */
+ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+ boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 04f7656906ca..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1692,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
#define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 2
#define regMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
#define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 2
+#define regUMCCH0_0_GeccCtrl 0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT 0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK 0x00002000L
#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index e85364dff4e0..5cb3e8634739 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -262,8 +262,9 @@ struct kfd2kgd_calls {
uint32_t queue_id);
int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
- uint32_t reset_type, unsigned int timeout,
- uint32_t pipe_id, uint32_t queue_id);
+ enum kfd_preempt_type reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 948cc75376f8..236657eece47 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
+ INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
if (adev->pm.dpm_enabled == 0)
return 0;
- INIT_LIST_HEAD(&adev->pm.pm_attr_list);
-
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
+ int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- amdgpu_kv_smc_bapm_enable(adev, false);
+ err = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (err)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count <=
+ (smu7_power_state->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_ps->performance_level_count <=
+ (vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 190af79f3236..dad3e3741a4e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t duty100, duty;
- uint64_t tmp64;
+ uint32_t current_rpm;
+ uint32_t percent = 0;
- duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
- CG_FDO_CTRL1, FMAX_DUTY100);
- duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
- CG_THERMAL_STATUS, FDO_PWM_DUTY);
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
- if (!duty100)
- return -EINVAL;
+ if (vega10_get_current_rpm(hwmgr, &current_rpm))
+ return -1;
+
+ if (hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM != 0)
+ percent = current_rpm * 255 /
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM;
- tmp64 = (uint64_t)duty * 255;
- do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = MIN(percent, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 13c5c7f1ecb9..4fe75dd2b329 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
ret = smu_enable_thermal_alert(smu);
if (ret) {
- dev_err(adev->dev, "Failed to enable thermal alert!\n");
- return ret;
+ dev_err(adev->dev, "Failed to enable thermal alert!\n");
+ return ret;
}
ret = smu_notify_display_change(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index ae2d337158f3..f77401709d83 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 5
+#define PMFW_DRIVER_IF_VERSION 7
typedef struct {
int32_t value;
@@ -163,8 +163,8 @@ typedef struct {
uint16_t DclkFrequency; //[MHz]
uint16_t MemclkFrequency; //[MHz]
uint16_t spare; //[centi]
- uint16_t UvdActivity; //[centi]
uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
@@ -199,6 +199,19 @@ typedef struct {
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
uint16_t spare2;
+
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageFclkFrequency;
+ uint16_t AverageGfxActivity;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageVclkFrequency;
+ uint16_t AverageVcnActivity;
+ uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads
+ uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes
+ uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower
+ uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]])
+ uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core]
+ uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 9d62ea2af132..8f72202aea8e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,7 +28,7 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 445005571f76..9cd005131f56 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu)
static int arcturus_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
uint32_t smu_version;
int ret;
+ /*
+ * Arcturus does not need the cstate disablement
+ * prerequisite for gpu reset.
+ */
+ if (amdgpu_in_reset(adev) || adev->in_suspend)
+ return 0;
+
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 619aee51b123..d30ec3005ea1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu)
static int aldebaran_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
+
+ /*
+ * Aldebaran does not need the cstate disablement
+ * prerequisite for gpu reset.
+ */
+ if (amdgpu_in_reset(adev) || adev->in_suspend)
+ return 0;
+
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 93fffdbab4f0..c4552ade8d44 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
return 0;
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
return 0;
/* override pptable_id from driver parameter */
@@ -454,9 +455,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
- pptable_id = 6666;
}
/* force using vbios pptable in sriov mode */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1d454485e0d9..29529328152d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1753,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -1822,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
+ .set_df_cstate = smu_v13_0_0_set_df_cstate,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c422bf8a09b1..c4102cfb734c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
+
+static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
+ .set_df_cstate = smu_v13_0_7_set_df_cstate,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 9f055d9710ea..16565a0a5da6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
+#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -40,6 +41,18 @@
#include "drm_dp_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct dp_aux_backlight {
struct backlight_device *base;
struct drm_dp_aux *aux;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index e3142c8142b3..61c29ce74b03 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
- ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+ ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 457448cc60f7..7d86020b5244 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -50,6 +51,18 @@
#include "drm_crtc_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
/**
* DOC: overview
*
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index f783d4963d4b..5b93c11895bb 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,14 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#define DEBUG /* for pr_debug() */
-
#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
@@ -40,7 +39,7 @@
* __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
-unsigned int __drm_debug;
+unsigned long __drm_debug;
EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
@@ -52,7 +51,30 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, __drm_debug, int, 0600);
+
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+module_param_named(debug, __drm_debug, ulong, 0600);
+#else
+/* classnames must match vals of enum drm_debug_category */
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
+static struct ddebug_class_param drm_debug_bitmap = {
+ .bits = &__drm_debug,
+ .flags = "p",
+ .map = &drm_debug_classes,
+};
+module_param_cb(debug, &param_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
+#endif
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
@@ -162,7 +184,8 @@ EXPORT_SYMBOL(__drm_printfn_info);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
{
- pr_debug("%s %pV", p->prefix, vaf);
+ /* pr_debug callsite decorations are unhelpful here */
+ printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_debug);
@@ -256,15 +279,16 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
+ /* we know we are printing for either syslog, tracefs, or both */
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
@@ -278,14 +302,14 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
va_end(args);
}
-EXPORT_SYMBOL(drm_dev_dbg);
+EXPORT_SYMBOL(__drm_dev_dbg);
-void __drm_dbg(enum drm_debug_category category, const char *format, ...)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
va_start(args, format);
@@ -297,7 +321,7 @@ void __drm_dbg(enum drm_debug_category category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(__drm_dbg);
+EXPORT_SYMBOL(___drm_dbg);
void __drm_err(const char *format, ...)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index bf33c3084cb4..a971590b8132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f84d39762a72..ca127ff797f7 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -142,8 +142,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
- hv->dirt_needed = true;
-
ret = hyperv_mode_config_init(hv);
if (ret)
goto err_free_mmio;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 76a182a9a765..013a7829182d 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -208,7 +208,7 @@ static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg
VM_PKT_DATA_INBAND, 0);
if (ret)
- drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+ drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..2b73f5ff0d02 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
else
dotclock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dd008ba8afe3..461c62c88413 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8130,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -8167,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
/* Transcoder timing limits */
if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c86e5d4ee016..1dddd6abd77b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
u32 alignment;
int ret;
+ /*
+ * We are not syncing against the binding (and potential migrations)
+ * below, so this vm must never be async.
+ */
+ GEM_WARN_ON(vm->bind_async_flags);
+
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (!ret) {
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ if (HAS_LMEM(dev_priv)) {
+ unsigned int flags = obj->flags;
+
+ /*
+ * For this type of buffer we need to able to read from the CPU
+ * the clear color value found in the buffer, hence we need to
+ * ensure it is always in the mappable part of lmem, if this is
+ * a small-bar device.
+ */
+ if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+ flags);
+ if (ret)
+ continue;
+ }
+
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- i915_gem_object_unlock(obj);
- }
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
+ if (ret)
+ continue;
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ continue;
+ }
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind_unlocked(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ continue;
}
- }
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+ if (ret)
+ continue;
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9def8d9fade6..d4cce627d7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder trans_shift;
i915_reg_t imr_reg;
u32 mask, val;
- /*
- * gen12+ has registers relative to transcoder and one per transcoder
- * using the same bit definition: handle it as TRANSCODER_EDP to force
- * 0 shift in bit definition
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- mask = EDP_PSR_ERROR(trans_shift);
+ mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
- mask |= EDP_PSR_POST_EXIT(trans_shift) |
- EDP_PSR_PRE_ENTRY(trans_shift);
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
- /* Warning: it is masking/setting reserved bits too */
val = intel_de_read(dev_priv, imr_reg);
- val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val &= ~psr_irq_mask_get(intel_dp);
val |= ~mask;
intel_de_write(dev_priv, imr_reg, val);
}
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ktime_t time_ns = ktime_get();
- enum transcoder trans_shift;
i915_reg_t imr_reg;
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
- if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
u32 val;
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* or unset irq_aux_error.
*/
val = intel_de_read(dev_priv, imr_reg);
- val |= EDP_PSR_ERROR(trans_shift);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
intel_de_write(dev_priv, imr_reg, val);
schedule_work(&intel_dp->psr.work);
@@ -1194,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12)
val = intel_de_read(dev_priv,
TRANS_PSR_IIR(intel_dp->psr.transcoder));
- val &= EDP_PSR_ERROR(0);
- } else {
+ else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
- }
+ val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 01b0932757ed..18178b01375e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1710,10 +1710,22 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
modifier == I915_FORMAT_MOD_4_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0bcde53c50c6..1e29b1e6d186 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1387,14 +1387,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
- bool skip = false;
- if (exit)
- skip = intel_context_set_exiting(ce);
- else if (!persistent)
- skip = intel_context_exit_nonpersistent(ce, NULL);
-
- if (skip)
+ if ((exit || !persistent) && intel_context_revoke(ce))
continue; /* Already marked. */
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cd75b0ca2555..845023c14eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_int() % num_vcs_engines(dev_priv);
+ prandom_u32_max(num_vcs_engines(dev_priv));
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7ff9c7877bec..369006c5317f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -653,6 +653,41 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id)
{
+ return __i915_gem_object_migrate(obj, ww, id, obj->flags);
+}
+
+/**
+ * __i915_gem_object_migrate - Migrate an object to the desired region id, with
+ * control of the extra flags
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ * @flags: The object flags. Normally just obj->flags.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags)
+{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mr;
@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
return 0;
}
- return obj->ops->migrate(obj, mr);
+ return obj->ops->migrate(obj, mr, flags);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7317d4102955..1723af9b0f6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id);
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags);
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
enum intel_region_id id);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 40305e2bcd49..d0d6772e6f36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
* pinning or for as long as the object lock is held.
*/
int (*migrate)(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr);
+ struct intel_memory_region *mr,
+ unsigned int flags);
void (*release)(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e3fc38dd5db0..4f861782c3e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
}
static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr)
+ struct intel_memory_region *mr,
+ unsigned int flags)
{
- return __i915_ttm_migrate(obj, mr, obj->flags);
+ return __i915_ttm_migrate(obj, mr, flags);
}
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8423df021b71..d4398948f016 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
static int
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
- const unsigned long end = addr + len;
+ VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
- int ret = -EFAULT;
mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ for_each_vma_range(vmi, vma, addr + len) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
break;
- if (vma->vm_end >= end) {
- ret = 0;
- break;
- }
-
addr = vma->vm_end;
}
mmap_read_unlock(mm);
- return ret;
+ if (vma)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 654a092ed3d6..e94365b08f1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
return ret;
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_revoke(struct intel_context *ce)
{
bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke)
- ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+ ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8e2d70630c49..be09fb2e883a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq);
+bool intel_context_revoke(struct intel_context *ce);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 30cf5c3369d9..2049a00417af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes)
+ if (!retained_ptes) {
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
+ }
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c6ebe2781076..152244d7f62a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b..1db59eeb34db 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
- if (unlikely(intel_context_is_banned(ce))) {
+ if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
return 0;
@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
- int ret = 0;
+ int ret;
- if (likely(!intel_context_is_banned(ce))) {
- ret = __guc_wq_item_append(rq);
+ if (unlikely(!intel_context_is_schedulable(ce)))
+ return 0;
- if (unlikely(ret == -EBUSY)) {
- guc->stalled_request = rq;
- guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
- }
+ ret = __guc_wq_item_append(rq);
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
}
return ret;
@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
* submitting all the requests generated in parallel.
*/
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
- intel_context_is_banned(ce);
+ !intel_context_is_schedulable(ce);
}
static int guc_dequeue_one_context(struct intel_guc *guc)
@@ -966,7 +966,7 @@ register_context:
struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
- !intel_context_is_banned(ce))) {
+ intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
- if (intel_context_is_banned(ce))
+ if (!intel_context_is_schedulable(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce));
@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of banned context 0x%04X on %s",
+ "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b81a6d35a7b..076c779f776a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu)
}
static int alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
- if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ if (!conf->low_mm || !conf->high_mm || !conf->fence) {
gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->low_gm_sz);
+ request = conf->low_mm;
if (request > avail)
goto no_enough_resource;
@@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->high_gm_sz);
+ request = conf->high_mm;
if (request > avail)
goto no_enough_resource;
@@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
- request = param->fence_sz;
+ request = conf->fence;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
- gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
- gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
- gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm;
+ gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm;
+ gvt->fence.vgpu_allocated_fence_num += conf->fence;
return 0;
no_enough_resource:
@@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
int ret;
- ret = alloc_resource(vgpu, param);
+ ret = alloc_resource(vgpu, conf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 705689e64011..dbf8d7470b2c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -36,6 +36,7 @@
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "intel_gvt.h"
@@ -172,6 +173,7 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
+ struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
@@ -211,7 +213,6 @@ struct intel_vgpu {
u32 scan_nonprivbb;
- struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
@@ -294,15 +295,25 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-#define NR_MAX_INTEL_VGPU_TYPES 20
-struct intel_vgpu_type {
- char name[16];
- unsigned int avail_instance;
- unsigned int low_gm_size;
- unsigned int high_gm_size;
+struct intel_vgpu_config {
+ unsigned int low_mm;
+ unsigned int high_mm;
unsigned int fence;
+
+ /*
+ * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with
+ * a weight of 4 on a contended host, different vGPU type has different
+ * weight set. Legal weights range from 1 to 16.
+ */
unsigned int weight;
- enum intel_vgpu_edid resolution;
+ enum intel_vgpu_edid edid;
+ const char *name;
+};
+
+struct intel_vgpu_type {
+ struct mdev_type type;
+ char name[16];
+ const struct intel_vgpu_config *conf;
};
struct intel_gvt {
@@ -326,6 +337,8 @@ struct intel_gvt {
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct mdev_parent parent;
+ struct mdev_type **mdev_types;
struct intel_vgpu_type *types;
unsigned int num_types;
struct intel_vgpu *idle_vgpu;
@@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* ring context size i.e. the first 0x50 dwords*/
#define RING_CTX_SIZE 320
-struct intel_vgpu_creation_params {
- __u64 low_gm_sz; /* in MB */
- __u64 high_gm_sz; /* in MB */
- __u64 fence_sz;
- __u64 resolution;
- __s32 primary;
- __u64 vgpu_id;
-
- __u32 weight;
-};
-
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param);
+ const struct intel_vgpu_config *conf);
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
@@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type);
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e3cd58946477..7a45e5360caf 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -34,7 +34,6 @@
*/
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
@@ -43,7 +42,6 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
-
- return sprintf(buf, "%s\n", type->name);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-static MDEV_TYPE_ATTR_RO(name);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- &mdev_type_attr_name.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (!group)
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
+ BYTES_TO_MB(type->conf->low_mm),
+ BYTES_TO_MB(type->conf->high_mm),
+ type->conf->fence, vgpu_edid_str(type->conf->edid),
+ type->conf->weight);
}
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
+static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
+{
+ struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct intel_vgpu_type *type =
+ container_of(mdev->type, struct intel_vgpu_type, type);
+
+ vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
+ return intel_gvt_create_vgpu(vgpu, type->conf);
+}
+
+static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+
+ intel_gvt_destroy_vgpu(vgpu);
+ vfio_free_device(vfio_dev);
+}
+
static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .init = intel_vgpu_init_dev,
+ .release = intel_vgpu_release_dev,
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
@@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
static int intel_vgpu_probe(struct mdev_device *mdev)
{
- struct device *pdev = mdev_parent_dev(mdev);
- struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
- struct intel_vgpu_type *type;
struct intel_vgpu *vgpu;
int ret;
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type)
- return -EINVAL;
-
- vgpu = intel_gvt_create_vgpu(gvt, type);
+ vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
if (IS_ERR(vgpu)) {
gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
return PTR_ERR(vgpu);
}
- vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
- &intel_vgpu_dev_ops);
-
dev_set_drvdata(&mdev->dev, vgpu);
ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
- if (ret) {
- intel_gvt_destroy_vgpu(vgpu);
- return ret;
- }
+ if (ret)
+ goto out_put_vdev;
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vgpu->vfio_device);
+ return ret;
}
static void intel_vgpu_remove(struct mdev_device *mdev)
@@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
if (WARN_ON_ONCE(vgpu->attached))
return;
- intel_gvt_destroy_vgpu(vgpu);
+
+ vfio_unregister_group_dev(&vgpu->vfio_device);
+ vfio_put_device(&vgpu->vfio_device);
+}
+
+static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
+{
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
+ struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+
+ mutex_lock(&gvt->lock);
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+ mutex_unlock(&gvt->lock);
+
+ return min3(low_gm_avail / type->conf->low_mm,
+ high_gm_avail / type->conf->high_mm,
+ fence_avail / type->conf->fence);
}
static struct mdev_driver intel_vgpu_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "intel_vgpu_mdev",
.owner = THIS_MODULE,
.dev_groups = intel_vgpu_groups,
},
- .probe = intel_vgpu_probe,
- .remove = intel_vgpu_remove,
- .supported_type_groups = gvt_vgpu_type_groups,
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .get_available = intel_vgpu_get_available,
+ .show_description = intel_vgpu_show_description,
};
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
@@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915)
if (drm_WARN_ON(&i915->drm, !gvt))
return;
- mdev_unregister_device(i915->drm.dev);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_parent(&gvt->parent);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
@@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
- ret = intel_gvt_init_vgpu_type_groups(gvt);
+ ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
+ &intel_vgpu_mdev_driver,
+ gvt->mdev_types, gvt->num_types);
if (ret)
goto out_destroy_idle_vgpu;
- ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
- if (ret)
- goto out_cleanup_vgpu_type_groups;
-
gvt_dbg_core("gvt device initialization is done\n");
return 0;
-out_cleanup_vgpu_type_groups:
- intel_gvt_cleanup_vgpu_type_groups(gvt);
out_destroy_idle_vgpu:
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_debugfs_clean(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 46da19b3225d..56c71474008a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+/*
+ * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
+ * generation type (e.g V4 as BDW server, V5 as SKL server).
+ *
+ * Depening on the physical SKU resource, we might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
+ * vGPU on same physical GPU depending on available resource. Each vGPU
+ * type will have a different number of avail_instance to indicate how
+ * many vGPU instance can be created for this type.
+ */
#define VGPU_MAX_WEIGHT 16
#define VGPU_WEIGHT(vgpu_num) \
(VGPU_MAX_WEIGHT / (vgpu_num))
-static const struct {
- unsigned int low_mm;
- unsigned int high_mm;
- unsigned int fence;
-
- /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
- * with a weight of 4 on a contended host, different vGPU type has
- * different weight set. Legal weights range from 1 to 16.
- */
- unsigned int weight;
- enum intel_vgpu_edid edid;
- const char *name;
-} vgpu_types[] = {
-/* Fixed vGPU type table */
+static const struct intel_vgpu_config intel_vgpu_configs[] = {
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
@@ -106,102 +103,58 @@ static const struct {
*/
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
- unsigned int num_types;
- unsigned int i, low_avail, high_avail;
- unsigned int min_low;
-
- /* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type (e.g V4 as BDW server, V5 as
- * SKL server).
- *
- * Depend on physical SKU resource, might see vGPU types like
- * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
- * different types of vGPU on same physical GPU depending on
- * available resource. Each vGPU type will have "avail_instance"
- * to indicate how many vGPU instance can be created for this
- * type.
- *
- */
- low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
- high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = ARRAY_SIZE(vgpu_types);
+ unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs);
+ unsigned int i;
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
- min_low = MB_TO_BYTES(32);
- for (i = 0; i < num_types; ++i) {
- if (low_avail / vgpu_types[i].low_mm == 0)
- break;
-
- gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
- gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
- gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types),
+ GFP_KERNEL);
+ if (!gvt->mdev_types)
+ goto out_free_types;
- if (vgpu_types[i].weight < 1 ||
- vgpu_types[i].weight > VGPU_MAX_WEIGHT)
- return -EINVAL;
+ for (i = 0; i < num_types; ++i) {
+ const struct intel_vgpu_config *conf = &intel_vgpu_configs[i];
- gvt->types[i].weight = vgpu_types[i].weight;
- gvt->types[i].resolution = vgpu_types[i].edid;
- gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
- high_avail / vgpu_types[i].high_mm);
+ if (low_avail / conf->low_mm == 0)
+ break;
+ if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT)
+ goto out_free_mdev_types;
- if (GRAPHICS_VER(gvt->gt->i915) == 8)
- sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (GRAPHICS_VER(gvt->gt->i915) == 9)
- sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ sprintf(gvt->types[i].name, "GVTg_V%u_%s",
+ GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name);
+ gvt->types[i].conf = conf;
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
- gvt->types[i].avail_instance,
- gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence,
- gvt->types[i].weight,
- vgpu_edid_str(gvt->types[i].resolution));
+ min(low_avail / conf->low_mm,
+ high_avail / conf->high_mm),
+ conf->low_mm, conf->high_mm, conf->fence,
+ conf->weight, vgpu_edid_str(conf->edid));
+
+ gvt->mdev_types[i] = &gvt->types[i].type;
+ gvt->mdev_types[i]->sysfs_name = gvt->types[i].name;
}
gvt->num_types = i;
return 0;
-}
-void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
-{
+out_free_mdev_types:
+ kfree(gvt->mdev_types);
+out_free_types:
kfree(gvt->types);
+ return -EINVAL;
}
-static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
{
- int i;
- unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min;
-
- /* Need to depend on maxium hw resource size but keep on
- * static config for now.
- */
- low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
- gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
- gvt->gm.vgpu_allocated_high_gm_size;
- fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
- gvt->fence.vgpu_allocated_fence_num;
-
- for (i = 0; i < gvt->num_types; i++) {
- low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
- high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
- fence_min = fence_avail / gvt->types[i].fence;
- gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
- fence_min);
-
- gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name,
- gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
- }
+ kfree(gvt->mdev_types);
+ kfree(gvt->types);
}
/**
@@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
-
- mutex_lock(&gvt->lock);
- intel_gvt_update_vgpu_types(gvt);
- mutex_unlock(&gvt->lock);
-
- vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
vfree(vgpu);
}
-static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *param)
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf)
{
+ struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
- param->low_gm_sz, param->high_gm_sz,
- param->fence_sz);
-
- vgpu = vzalloc(sizeof(*vgpu));
- if (!vgpu)
- return ERR_PTR(-ENOMEM);
+ gvt_dbg_core("low %u MB high %u MB fence %u\n",
+ BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm),
+ conf->fence);
+ mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_free_vgpu;
+ goto out_unlock;;
vgpu->id = ret;
- vgpu->gvt = gvt;
- vgpu->sched_ctl.weight = param->weight;
+ vgpu->sched_ctl.weight = conf->weight;
mutex_init(&vgpu->vgpu_lock);
mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init_base(&vgpu->object_idr, 1);
- intel_vgpu_init_cfg_space(vgpu, param->primary);
+ intel_vgpu_init_cfg_space(vgpu, 1);
vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
goto out_clean_idr;
- ret = intel_vgpu_alloc_resource(vgpu, param);
+ ret = intel_vgpu_alloc_resource(vgpu, conf);
if (ret)
goto out_clean_vgpu_mmio;
@@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_display(vgpu, conf->edid);
if (ret)
goto out_clean_opregion;
@@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- return vgpu;
+ intel_gvt_update_reg_whitelist(vgpu);
+ mutex_unlock(&gvt->lock);
+ return 0;
out_clean_sched_policy:
intel_vgpu_clean_sched_policy(vgpu);
@@ -455,48 +400,9 @@ out_clean_vgpu_mmio:
intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
-out_free_vgpu:
- vfree(vgpu);
- return ERR_PTR(ret);
-}
-
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @type: type of the vGPU to create
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type)
-{
- struct intel_vgpu_creation_params param;
- struct intel_vgpu *vgpu;
-
- param.primary = 1;
- param.low_gm_sz = type->low_gm_size;
- param.high_gm_sz = type->high_gm_size;
- param.fence_sz = type->fence;
- param.weight = type->weight;
- param.resolution = type->resolution;
-
- /* XXX current param based on MB */
- param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
- param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
-
- mutex_lock(&gvt->lock);
- vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu)) {
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
- intel_gvt_update_reg_whitelist(vgpu);
- }
+out_unlock:
mutex_unlock(&gvt->lock);
-
- return vgpu;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..7bd1861ddbdf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
range = round_down(end - len, align) - round_up(start, align);
if (range) {
if (sizeof(unsigned long) == sizeof(u64)) {
- addr = get_random_long();
+ addr = get_random_u64();
} else {
- addr = get_random_int();
+ addr = get_random_u32();
if (range > U32_MAX) {
addr <<= 32;
- addr |= get_random_int();
+ addr |= get_random_u32();
}
}
div64_u64_rem(addr, range, &addr);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6fc475a5db61..d1e4d528cb17 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -29,6 +29,18 @@
#include "i915_params.h"
#include "i915_drv.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
#define i915_param_named(name, T, perm, desc) \
module_param_named(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a9bd829fc7e..0b287a59dc2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2157,10 +2157,18 @@
#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index c4e932368b37..39da0fb0d6d2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -135,7 +135,7 @@ static int __run_selftests(const char *name,
int err = 0;
while (!i915_selftest.random_seed)
- i915_selftest.random_seed = get_random_int();
+ i915_selftest.random_seed = get_random_u32();
i915_selftest.timeout_jiffies =
i915_selftest.timeout_ms ?
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 16356611b5b9..20fe53815b20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -139,44 +139,24 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
- struct vm_fault *vmf, struct migrate_vma *args,
- dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
+ struct page *dpage, dma_addr_t *dma_addr)
{
struct device *dev = drm->dev->dev;
- struct page *dpage, *spage;
- struct nouveau_svmm *svmm;
-
- spage = migrate_pfn_to_page(args->src[0]);
- if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
- return 0;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
- if (!dpage)
- return VM_FAULT_SIGBUS;
lock_page(dpage);
*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
- goto error_free_page;
+ return -EIO;
- svmm = spage->zone_device_data;
- mutex_lock(&svmm->mutex);
- nouveau_svmm_invalidate(svmm, args->start, args->end);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
- goto error_dma_unmap;
- mutex_unlock(&svmm->mutex);
+ NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ return -EIO;
+ }
- args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;
-
-error_dma_unmap:
- mutex_unlock(&svmm->mutex);
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
-error_free_page:
- __free_page(dpage);
- return VM_FAULT_SIGBUS;
}
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
@@ -184,9 +164,11 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_drm *drm = page_to_drm(vmf->page);
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
+ struct nouveau_svmm *svmm;
+ struct page *spage, *dpage;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
- vm_fault_t ret;
+ vm_fault_t ret = 0;
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
@@ -194,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.src = &src,
.dst = &dst,
.pgmap_owner = drm->dev,
+ .fault_page = vmf->page,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
};
@@ -207,9 +190,25 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
- if (ret || dst == 0)
+ spage = migrate_pfn_to_page(src);
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ goto done;
+
+ dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ if (!dpage)
+ goto done;
+
+ dst = migrate_pfn(page_to_pfn(dpage));
+
+ svmm = spage->zone_device_data;
+ mutex_lock(&svmm->mutex);
+ nouveau_svmm_invalidate(svmm, args.start, args.end);
+ ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ mutex_unlock(&svmm->mutex);
+ if (ret) {
+ ret = VM_FAULT_SIGBUS;
goto done;
+ }
nouveau_fence_new(dmem->migrate.chan, false, &fence);
migrate_vma_pages(&args);
@@ -326,7 +325,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- lock_page(page);
+ zone_device_page_init(page);
return page;
}
@@ -369,6 +368,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
mutex_unlock(&drm->dmem->mutex);
}
+/*
+ * Evict all pages mapping a chunk.
+ */
+static void
+nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+{
+ unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
+ unsigned long *src_pfns, *dst_pfns;
+ dma_addr_t *dma_addrs;
+ struct nouveau_fence *fence;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ npages);
+
+ for (i = 0; i < npages; i++) {
+ if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
+ struct page *dpage;
+
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ nouveau_dmem_copy_one(chunk->drm,
+ migrate_pfn_to_page(src_pfns[i]), dpage,
+ &dma_addrs[i]);
+ }
+ }
+
+ nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ nouveau_dmem_fence_done(&fence);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dma_addrs);
+}
+
void
nouveau_dmem_fini(struct nouveau_drm *drm)
{
@@ -380,8 +425,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
nouveau_bo_ref(NULL, &chunk->bo);
+ WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.range.start,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 561309d447e0..fd99ec0f4257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
@@ -70,6 +71,18 @@
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 89056a1aac7d..6bd0634e2d58 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
{
struct panfrost_dump_object_header *hdr = iter->hdr;
- hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
- hdr->type = cpu_to_le32(type);
- hdr->file_offset = cpu_to_le32(iter->data - iter->start);
- hdr->file_size = cpu_to_le32(data_end - iter->data);
+ hdr->magic = PANFROSTDUMP_MAGIC;
+ hdr->type = type;
+ hdr->file_offset = iter->data - iter->start;
+ hdr->file_size = data_end - iter->data;
iter->hdr++;
- iter->data += le32_to_cpu(hdr->file_size);
+ iter->data += hdr->file_size;
}
static void
@@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
reg = panfrost_dump_registers[i] + js_as_offset;
- dumpreg->reg = cpu_to_le32(reg);
- dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
+ dumpreg->reg = reg;
+ dumpreg->value = gpu_read(pfdev, reg);
}
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
@@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job)
struct panfrost_dump_iterator iter;
struct drm_gem_object *dbo;
unsigned int n_obj, n_bomap_pages;
- __le64 *bomap, *bomap_start;
+ u64 *bomap, *bomap_start;
size_t file_size;
u32 as_nr;
int slot;
@@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job)
* For now, we write the job identifier in the register dump header,
* so that we can decode the entire dump later with pandecode
*/
- iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
- iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
- iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
- iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
- iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
+ iter.hdr->reghdr.jc = job->jc;
+ iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
+ iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
+ iter.hdr->reghdr.gpu_id = pfdev->features.id;
+ iter.hdr->reghdr.nbos = job->bo_count;
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
@@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job)
WARN_ON(!mapping->active);
- iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
+ iter.hdr->bomap.data[0] = bomap - bomap_start;
for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
struct page *page = sg_page_iter_page(&page_iter);
if (!IS_ERR(page)) {
- *bomap++ = cpu_to_le64(page_to_phys(page));
+ *bomap++ = page_to_phys(page);
} else {
dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
- *bomap++ = ~cpu_to_le64(0);
+ *bomap++ = 0;
}
}
- iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
+ iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_shmem_vunmap(&bo->base, &map);
- iter.hdr->bomap.valid = cpu_to_le32(1);
+ iter.hdr->bomap.valid = 1;
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
bo->base.base.size);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 6b25b2f4f5a3..6137537aaea4 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
}
s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched) {
+ if (s_fence && s_fence->sched == sched &&
+ !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a2b2d6bc3fe..62f69589a72d 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -729,7 +729,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
static int drm_buddy_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 8d86c250c2ec..2191e57f2297 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
- buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE);
+ buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
}
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 659d1af4dca7..c4b66eeae203 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -2212,7 +2212,7 @@ err_nodes:
static int drm_mm_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index ffbbb454c9e8..2027063fdc30 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -490,6 +490,7 @@ module_init(vc4_drm_register);
module_exit(vc4_drm_unregister);
MODULE_ALIAS("platform:vc4-drm");
+MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 64f9feabf43e..596e311d6e58 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -3318,12 +3318,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
unsigned long __maybe_unused flags;
u32 __maybe_unused value;
+ unsigned long rate;
int ret;
+ /*
+ * The HSM clock is in the HDMI power domain, so we need to set
+ * its frequency while the power domain is active so that it
+ * keeps its rate.
+ */
+ ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
+ /*
+ * Whenever the RaspberryPi boots without an HDMI monitor
+ * plugged in, the firmware won't have initialized the HSM clock
+ * rate and it will be reported as 0.
+ *
+ * If we try to access a register of the controller in such a
+ * case, it will lead to a silent CPU stall. Let's make sure we
+ * prevent such a case.
+ */
+ rate = clk_get_rate(vc4_hdmi->hsm_clock);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -3345,6 +3370,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
#endif
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ return ret;
}
static void vc4_hdmi_put_ddc_device(void *ptr)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 6970797cdc56..1ccab8aa326c 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -59,6 +59,12 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_ctrl_cmd;
+module_param(swap_ctrl_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_ctrl_cmd, "Swap the Control (\"Ctrl\") and Command (\"Flag\") keys. "
+ "(For people who are used to Mac shortcuts involving Command instead of Control. "
+ "[0] = No change. 1 = Swapped.)");
+
static unsigned int swap_fn_leftctrl;
module_param(swap_fn_leftctrl, uint, 0644);
MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
@@ -308,12 +314,21 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
{ KEY_LEFTALT, KEY_LEFTMETA },
{ KEY_LEFTMETA, KEY_LEFTALT },
{ KEY_RIGHTALT, KEY_RIGHTMETA },
- { KEY_RIGHTMETA,KEY_RIGHTALT },
+ { KEY_RIGHTMETA, KEY_RIGHTALT },
+ { }
+};
+
+static const struct apple_key_translation swapped_ctrl_cmd_keys[] = {
+ { KEY_LEFTCTRL, KEY_LEFTMETA },
+ { KEY_LEFTMETA, KEY_LEFTCTRL },
+ { KEY_RIGHTCTRL, KEY_RIGHTMETA },
+ { KEY_RIGHTMETA, KEY_RIGHTCTRL },
{ }
};
static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
{ KEY_FN, KEY_LEFTCTRL },
+ { KEY_LEFTCTRL, KEY_FN },
{ }
};
@@ -375,24 +390,47 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
struct apple_sc *asc = hid_get_drvdata(hid);
const struct apple_key_translation *trans, *table;
bool do_translate;
- u16 code = 0;
+ u16 code = usage->code;
unsigned int real_fnmode;
- u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
-
- if (usage->code == fn_keycode) {
- asc->fn_on = !!value;
- input_event_with_scancode(input, usage->type, KEY_FN,
- usage->hid, value);
- return 1;
- }
-
if (fnmode == 3) {
real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
} else {
real_fnmode = fnmode;
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, code);
+
+ if (trans)
+ code = trans->to;
+ }
+
+ if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
+ hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
+ trans = apple_find_translation(apple_iso_keyboard, code);
+
+ if (trans)
+ code = trans->to;
+ }
+
+ if (swap_opt_cmd) {
+ trans = apple_find_translation(swapped_option_cmd_keys, code);
+
+ if (trans)
+ code = trans->to;
+ }
+
+ if (swap_ctrl_cmd) {
+ trans = apple_find_translation(swapped_ctrl_cmd_keys, code);
+
+ if (trans)
+ code = trans->to;
+ }
+
+ if (code == KEY_FN)
+ asc->fn_on = !!value;
+
if (real_fnmode) {
if (hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI ||
hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO ||
@@ -430,15 +468,18 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
else
table = apple_fn_keys;
- trans = apple_find_translation (table, usage->code);
+ trans = apple_find_translation(table, code);
if (trans) {
- if (test_bit(trans->from, input->key))
+ bool from_is_set = test_bit(trans->from, input->key);
+ bool to_is_set = test_bit(trans->to, input->key);
+
+ if (from_is_set)
code = trans->from;
- else if (test_bit(trans->to, input->key))
+ else if (to_is_set)
code = trans->to;
- if (!code) {
+ if (!(from_is_set || to_is_set)) {
if (trans->flags & APPLE_FLAG_FKEY) {
switch (real_fnmode) {
case 1:
@@ -455,62 +496,31 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
do_translate = asc->fn_on;
}
- code = do_translate ? trans->to : trans->from;
+ if (do_translate)
+ code = trans->to;
}
-
- input_event_with_scancode(input, usage->type, code,
- usage->hid, value);
- return 1;
}
if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
- (test_bit(usage->code, asc->pressed_numlock) ||
+ (test_bit(code, asc->pressed_numlock) ||
test_bit(LED_NUML, input->led))) {
- trans = apple_find_translation(powerbook_numlock_keys,
- usage->code);
+ trans = apple_find_translation(powerbook_numlock_keys, code);
if (trans) {
if (value)
- set_bit(usage->code,
- asc->pressed_numlock);
+ set_bit(code, asc->pressed_numlock);
else
- clear_bit(usage->code,
- asc->pressed_numlock);
+ clear_bit(code, asc->pressed_numlock);
- input_event_with_scancode(input, usage->type,
- trans->to, usage->hid, value);
+ code = trans->to;
}
-
- return 1;
- }
- }
-
- if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
- hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
- trans = apple_find_translation(apple_iso_keyboard, usage->code);
- if (trans) {
- input_event_with_scancode(input, usage->type,
- trans->to, usage->hid, value);
- return 1;
}
}
- if (swap_opt_cmd) {
- trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
- if (trans) {
- input_event_with_scancode(input, usage->type,
- trans->to, usage->hid, value);
- return 1;
- }
- }
+ if (usage->code != code) {
+ input_event_with_scancode(input, usage->type, code, usage->hid, value);
- if (swap_fn_leftctrl) {
- trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
- if (trans) {
- input_event_with_scancode(input, usage->type,
- trans->to, usage->hid, value);
- return 1;
- }
+ return 1;
}
return 0;
@@ -640,9 +650,6 @@ static void apple_setup_input(struct input_dev *input)
apple_setup_key_translation(input, apple2021_fn_keys);
apple_setup_key_translation(input, macbookpro_no_esc_fn_keys);
apple_setup_key_translation(input, macbookpro_dedicated_esc_fn_keys);
-
- if (swap_fn_leftctrl)
- apple_setup_key_translation(input, swapped_fn_leftctrl_keys);
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -1011,21 +1018,21 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213),
- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
+ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 81e7e404a5fc..2ca6ab600bc9 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1014,7 +1014,8 @@ static const char *absolutes[ABS_CNT] = {
[ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
[ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
[ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
- [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
+ [ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile",
+ [ABS_MISC] = "Misc",
[ABS_MT_TOUCH_MAJOR] = "MTMajor",
[ABS_MT_TOUCH_MINOR] = "MTMinor",
[ABS_MT_WIDTH_MAJOR] = "MTMajorW",
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index e014ef36d872..8069f795c864 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1089,7 +1089,7 @@ static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
struct cs_char *csdata = vmf->vma->vm_private_data;
struct page *page;
- page = virt_to_page(csdata->mmap_base);
+ page = virt_to_page((void *)csdata->mmap_base);
get_page(page);
vmf->page = page;
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index cd7ebf4c2e2f..97ba59e60663 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/hsi/ssi_protocol.h>
static unsigned int pm = 1;
@@ -75,8 +74,7 @@ static int nokia_modem_gpio_probe(struct device *dev)
struct nokia_modem_device *modem = dev_get_drvdata(dev);
int gpio_count, gpio_name_count, i, err;
- gpio_count = of_gpio_count(np);
-
+ gpio_count = gpiod_count(dev, NULL);
if (gpio_count < 0) {
dev_err(dev, "missing gpios: %d\n", gpio_count);
return gpio_count;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 21f11a5b965b..274ad8443f8c 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -796,7 +796,6 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
dev_err(&cl->device, "No memory for rx skb\n");
goto out1;
}
- skb->dev = ssi->netdev;
skb_put(skb, len * 4);
msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
if (unlikely(!msg)) {
@@ -931,6 +930,7 @@ static int ssip_pn_open(struct net_device *dev)
if (err < 0) {
dev_err(&cl->device, "Register HSI port event failed (%d)\n",
err);
+ hsi_release_port(cl);
return err;
}
dev_dbg(&cl->device, "Configuring SSI port\n");
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 44a3f5660c10..eb9820158318 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -524,6 +524,7 @@ static int ssi_probe(struct platform_device *pd)
if (!childpdev) {
err = -ENODEV;
dev_err(&pd->dev, "failed to create ssi controller port\n");
+ of_node_put(child);
goto out3;
}
}
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index a0cb5be246e1..b9495b720f1b 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -230,10 +230,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
if (msg->ttype == HSI_MSG_READ) {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_FROM_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
@@ -247,10 +247,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
} else {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_TO_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index eca7afd366d6..9dc27e5d367a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -431,34 +431,29 @@ struct vmbus_channel *relid2channel(u32 relid)
void vmbus_on_event(unsigned long data)
{
struct vmbus_channel *channel = (void *) data;
- unsigned long time_limit = jiffies + 2;
+ void (*callback_fn)(void *context);
trace_vmbus_on_event(channel);
hv_debug_delay_test(channel, INTERRUPT_DELAY);
- do {
- void (*callback_fn)(void *);
- /* A channel once created is persistent even when
- * there is no driver handling the device. An
- * unloading driver sets the onchannel_callback to NULL.
- */
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (unlikely(callback_fn == NULL))
- return;
-
- (*callback_fn)(channel->channel_callback_context);
+ /* A channel once created is persistent even when
+ * there is no driver handling the device. An
+ * unloading driver sets the onchannel_callback to NULL.
+ */
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(!callback_fn))
+ return;
- if (channel->callback_mode != HV_CALL_BATCHED)
- return;
+ (*callback_fn)(channel->channel_callback_context);
- if (likely(hv_end_read(&channel->inbound) == 0))
- return;
+ if (channel->callback_mode != HV_CALL_BATCHED)
+ return;
- hv_begin_read(&channel->inbound);
- } while (likely(time_before(jiffies, time_limit)));
+ if (likely(hv_end_read(&channel->inbound) == 0))
+ return;
- /* The time limit (2 jiffies) has been reached */
+ hv_begin_read(&channel->inbound);
tasklet_schedule(&channel->callback_event);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 7b9f3fc3adf7..8b2e413bf19c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -46,8 +46,6 @@ struct vmbus_dynid {
static struct acpi_device *hv_acpi_dev;
-static struct completion probe_event;
-
static int hyperv_cpuhp_online;
static void *hv_panic_page;
@@ -1132,7 +1130,8 @@ void vmbus_on_msg_dpc(unsigned long data)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
+ ctx->msg.header = msg_copy.header;
+ memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
/*
* The host can generate a rescind message while we
@@ -1573,7 +1572,7 @@ err_setup:
}
/**
- * __vmbus_child_driver_register() - Register a vmbus's driver
+ * __vmbus_driver_register() - Register a vmbus's driver
* @hv_driver: Pointer to driver structure you want to register
* @owner: owner module of the drv
* @mod_name: module name string
@@ -2052,7 +2051,7 @@ struct hv_device *vmbus_device_create(const guid_t *type,
child_device_obj->channel = channel;
guid_copy(&child_device_obj->dev_type, type);
guid_copy(&child_device_obj->dev_instance, instance);
- child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
+ child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
return child_device_obj;
}
@@ -2468,7 +2467,6 @@ static int vmbus_acpi_add(struct acpi_device *device)
ret_val = 0;
acpi_walk_err:
- complete(&probe_event);
if (ret_val)
vmbus_acpi_remove(device);
return ret_val;
@@ -2647,6 +2645,7 @@ static struct acpi_driver vmbus_acpi_driver = {
.remove = vmbus_acpi_remove,
},
.drv.pm = &vmbus_bus_pm,
+ .drv.probe_type = PROBE_FORCE_SYNCHRONOUS,
};
static void hv_kexec_handler(void)
@@ -2719,7 +2718,7 @@ static struct syscore_ops hv_synic_syscore_ops = {
static int __init hv_acpi_init(void)
{
- int ret, t;
+ int ret;
if (!hv_is_hyperv_initialized())
return -ENODEV;
@@ -2727,8 +2726,6 @@ static int __init hv_acpi_init(void)
if (hv_root_partition)
return 0;
- init_completion(&probe_event);
-
/*
* Get ACPI resources first.
*/
@@ -2737,9 +2734,8 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- t = wait_for_completion_timeout(&probe_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
+ if (!hv_acpi_dev) {
+ ret = -ENODEV;
goto cleanup;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5695b266abcf..7ac3daaf59ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1758,6 +1758,7 @@ config SENSORS_SIS5595
config SENSORS_SY7636A
tristate "Silergy SY7636A"
+ depends on MFD_SY7636A
help
If you say yes here you get support for the thermistor readout of
the Silergy SY7636A PMIC.
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 45407b12db4b..dd690f700d49 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
@@ -1216,8 +1217,16 @@ int occ_setup(struct occ *occ)
occ->groups[0] = &occ->group;
rc = occ_setup_sysfs(occ);
- if (rc)
+ if (rc) {
dev_err(occ->bus_dev, "failed to setup sysfs: %d\n", rc);
+ return rc;
+ }
+
+ if (!device_property_read_bool(occ->bus_dev, "ibm,no-poll-on-init")) {
+ rc = occ_active(occ, true);
+ if (rc)
+ occ_shutdown_sysfs(occ);
+ }
return rc;
}
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index c1e0a1d96cd4..96521363b696 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -7,6 +7,7 @@
#include <linux/fsi-occ.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/string.h>
@@ -14,6 +15,8 @@
#include "common.h"
+#define OCC_CHECKSUM_RETRIES 3
+
struct p9_sbe_occ {
struct occ occ;
bool sbe_error;
@@ -80,18 +83,23 @@ done:
static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
void *resp, size_t resp_len)
{
+ size_t original_resp_len = resp_len;
struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
- int rc;
+ int rc, i;
- rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
- if (rc < 0) {
+ for (i = 0; i < OCC_CHECKSUM_RETRIES; ++i) {
+ rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
+ if (rc >= 0)
+ break;
if (resp_len) {
if (p9_sbe_occ_save_ffdc(ctx, resp, resp_len))
sysfs_notify(&occ->bus_dev->kobj, NULL,
bin_attr_ffdc.attr.name);
+ return rc;
}
-
- return rc;
+ if (rc != -EBADE)
+ return rc;
+ resp_len = original_resp_len;
}
switch (((struct occ_response *)resp)->return_status) {
@@ -174,9 +182,17 @@ static int p9_sbe_occ_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id p9_sbe_occ_of_match[] = {
+ { .compatible = "ibm,p9-occ-hwmon" },
+ { .compatible = "ibm,p10-occ-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, p9_sbe_occ_of_match);
+
static struct platform_driver p9_sbe_occ_driver = {
.driver = {
.name = "occ-hwmon",
+ .of_match_table = p9_sbe_occ_of_match,
},
.probe = p9_sbe_occ_probe,
.remove = p9_sbe_occ_remove,
diff --git a/drivers/hwtracing/Kconfig b/drivers/hwtracing/Kconfig
index 13085835a636..911ee977103c 100644
--- a/drivers/hwtracing/Kconfig
+++ b/drivers/hwtracing/Kconfig
@@ -5,4 +5,6 @@ source "drivers/hwtracing/stm/Kconfig"
source "drivers/hwtracing/intel_th/Kconfig"
+source "drivers/hwtracing/ptt/Kconfig"
+
endmenu
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 514a9b8086e3..45c1eb5dfcb7 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -193,10 +193,10 @@ config CORESIGHT_TRBE
depends on ARM64 && CORESIGHT_SOURCE_ETM4X
help
This driver provides support for percpu Trace Buffer Extension (TRBE).
- TRBE always needs to be used along with it's corresponding percpu ETE
+ TRBE always needs to be used along with its corresponding percpu ETE
component. ETE generates trace data which is then captured with TRBE.
Unlike traditional sink devices, TRBE is a CPU feature accessible via
- system registers. But it's explicit dependency with trace unit (ETE)
+ system registers. But its explicit dependency with trace unit (ETE)
requires it to be plugged in as a coresight sink device.
To compile this driver as a module, choose M here: the module will be
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index e0740c6dbd54..bc90a03f478f 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -365,26 +365,15 @@ static const struct etr_buf_operations etr_catu_buf_ops = {
.get_data = catu_get_data_etr_buf,
};
-coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
-coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
-coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
-coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
-coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
-coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
-coresight_simple_reg64(struct catu_drvdata, sladdr,
- CATU_SLADDRLO, CATU_SLADDRHI);
-coresight_simple_reg64(struct catu_drvdata, inaddr,
- CATU_INADDRLO, CATU_INADDRHI);
-
static struct attribute *catu_mgmt_attrs[] = {
- &dev_attr_devid.attr,
- &dev_attr_control.attr,
- &dev_attr_status.attr,
- &dev_attr_mode.attr,
- &dev_attr_axictrl.attr,
- &dev_attr_irqen.attr,
- &dev_attr_sladdr.attr,
- &dev_attr_inaddr.attr,
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
+ coresight_simple_reg32(control, CATU_CONTROL),
+ coresight_simple_reg32(status, CATU_STATUS),
+ coresight_simple_reg32(mode, CATU_MODE),
+ coresight_simple_reg32(axictrl, CATU_AXICTRL),
+ coresight_simple_reg32(irqen, CATU_IRQEN),
+ coresight_simple_reg64(sladdr, CATU_SLADDRLO, CATU_SLADDRHI),
+ coresight_simple_reg64(inaddr, CATU_INADDRLO, CATU_INADDRHI),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 6160c2d75a56..442e034bbfba 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -70,24 +70,24 @@ struct catu_drvdata {
static inline u32 \
catu_read_##name(struct catu_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, offset, -1); \
+ return csdev_access_relaxed_read32(&drvdata->csdev->access, offset); \
} \
static inline void \
catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+ csdev_access_relaxed_write32(&drvdata->csdev->access, val, offset); \
}
#define CATU_REG_PAIR(name, lo_off, hi_off) \
static inline u64 \
catu_read_##name(struct catu_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+ return csdev_access_relaxed_read_pair(&drvdata->csdev->access, lo_off, hi_off); \
} \
static inline void \
catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+ csdev_access_relaxed_write_pair(&drvdata->csdev->access, val, lo_off, hi_off); \
}
CATU_REG32(control, CATU_CONTROL);
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 1edfec1e9d18..d5dbc67bacb4 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -60,6 +60,34 @@ EXPORT_SYMBOL_GPL(coresight_barrier_pkt);
static const struct cti_assoc_op *cti_assoc_ops;
+ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
+
+ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show32);
+
void coresight_set_cti_ops(const struct cti_assoc_op *cti_op)
{
cti_assoc_ops = cti_op;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 7ff7e7780bbf..6d59c815ecf5 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -163,48 +163,82 @@ static struct attribute *coresight_cti_attrs[] = {
/* register based attributes */
-/* macro to access RO registers with power check only (no enable check). */
-#define coresight_cti_reg(name, offset) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- u32 val = 0; \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- val = readl_relaxed(drvdata->base + offset); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return sprintf(buf, "0x%x\n", val); \
-} \
-static DEVICE_ATTR_RO(name)
+/* Read registers with power check only (no enable check). */
+static ssize_t coresight_cti_reg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
+ u32 val = 0;
-/* coresight management registers */
-coresight_cti_reg(devaff0, CTIDEVAFF0);
-coresight_cti_reg(devaff1, CTIDEVAFF1);
-coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS);
-coresight_cti_reg(devarch, CORESIGHT_DEVARCH);
-coresight_cti_reg(devid, CORESIGHT_DEVID);
-coresight_cti_reg(devtype, CORESIGHT_DEVTYPE);
-coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0);
-coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1);
-coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2);
-coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3);
-coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4);
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ if (drvdata->config.hw_powered)
+ val = readl_relaxed(drvdata->base + cti_attr->off);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put_sync(dev->parent);
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+/* Write registers with power check only (no enable check). */
+static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
+ unsigned long val = 0;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ if (drvdata->config.hw_powered)
+ cti_write_single_reg(drvdata, cti_attr->off, val);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put_sync(dev->parent);
+ return size;
+}
+
+#define coresight_cti_reg(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_cti_reg_show, NULL), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_cti_reg_rw(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0644, coresight_cti_reg_show, \
+ coresight_cti_reg_store), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_cti_reg_wo(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0200, NULL, coresight_cti_reg_store), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+/* coresight management registers */
static struct attribute *coresight_cti_mgmt_attrs[] = {
- &dev_attr_devaff0.attr,
- &dev_attr_devaff1.attr,
- &dev_attr_authstatus.attr,
- &dev_attr_devarch.attr,
- &dev_attr_devid.attr,
- &dev_attr_devtype.attr,
- &dev_attr_pidr0.attr,
- &dev_attr_pidr1.attr,
- &dev_attr_pidr2.attr,
- &dev_attr_pidr3.attr,
- &dev_attr_pidr4.attr,
+ coresight_cti_reg(devaff0, CTIDEVAFF0),
+ coresight_cti_reg(devaff1, CTIDEVAFF1),
+ coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS),
+ coresight_cti_reg(devarch, CORESIGHT_DEVARCH),
+ coresight_cti_reg(devid, CORESIGHT_DEVID),
+ coresight_cti_reg(devtype, CORESIGHT_DEVTYPE),
+ coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0),
+ coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1),
+ coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2),
+ coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3),
+ coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4),
NULL,
};
@@ -454,86 +488,11 @@ static ssize_t apppulse_store(struct device *dev,
}
static DEVICE_ATTR_WO(apppulse);
-coresight_cti_reg(triginstatus, CTITRIGINSTATUS);
-coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS);
-coresight_cti_reg(chinstatus, CTICHINSTATUS);
-coresight_cti_reg(choutstatus, CTICHOUTSTATUS);
-
/*
* Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
* integration control registers. Normally only used to investigate connection
* data.
*/
-#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
-
-/* macro to access RW registers with power check only (no enable check). */
-#define coresight_cti_reg_rw(name, offset) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- u32 val = 0; \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- val = readl_relaxed(drvdata->base + offset); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return sprintf(buf, "0x%x\n", val); \
-} \
- \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- unsigned long val = 0; \
- if (kstrtoul(buf, 0, &val)) \
- return -EINVAL; \
- \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- cti_write_single_reg(drvdata, offset, val); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return size; \
-} \
-static DEVICE_ATTR_RW(name)
-
-/* macro to access WO registers with power check only (no enable check). */
-#define coresight_cti_reg_wo(name, offset) \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- unsigned long val = 0; \
- if (kstrtoul(buf, 0, &val)) \
- return -EINVAL; \
- \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- cti_write_single_reg(drvdata, offset, val); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return size; \
-} \
-static DEVICE_ATTR_WO(name)
-
-coresight_cti_reg_rw(itchout, ITCHOUT);
-coresight_cti_reg_rw(ittrigout, ITTRIGOUT);
-coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL);
-coresight_cti_reg_wo(itchinack, ITCHINACK);
-coresight_cti_reg_wo(ittriginack, ITTRIGINACK);
-coresight_cti_reg(ittrigin, ITTRIGIN);
-coresight_cti_reg(itchin, ITCHIN);
-coresight_cti_reg(itchoutack, ITCHOUTACK);
-coresight_cti_reg(ittrigoutack, ITTRIGOUTACK);
-
-#endif /* CORESIGHT_CTI_INTEGRATION_REGS */
-
static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_inout_sel.attr,
&dev_attr_inen.attr,
@@ -544,20 +503,20 @@ static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_appset.attr,
&dev_attr_appclear.attr,
&dev_attr_apppulse.attr,
- &dev_attr_triginstatus.attr,
- &dev_attr_trigoutstatus.attr,
- &dev_attr_chinstatus.attr,
- &dev_attr_choutstatus.attr,
+ coresight_cti_reg(triginstatus, CTITRIGINSTATUS),
+ coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS),
+ coresight_cti_reg(chinstatus, CTICHINSTATUS),
+ coresight_cti_reg(choutstatus, CTICHOUTSTATUS),
#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
- &dev_attr_itctrl.attr,
- &dev_attr_ittrigin.attr,
- &dev_attr_itchin.attr,
- &dev_attr_ittrigout.attr,
- &dev_attr_itchout.attr,
- &dev_attr_itchoutack.attr,
- &dev_attr_ittrigoutack.attr,
- &dev_attr_ittriginack.attr,
- &dev_attr_itchinack.attr,
+ coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL),
+ coresight_cti_reg(ittrigin, ITTRIGIN),
+ coresight_cti_reg(itchin, ITCHIN),
+ coresight_cti_reg_rw(ittrigout, ITTRIGOUT),
+ coresight_cti_reg_rw(itchout, ITCHOUT),
+ coresight_cti_reg(itchoutack, ITCHOUTACK),
+ coresight_cti_reg(ittrigoutack, ITTRIGOUTACK),
+ coresight_cti_reg_wo(ittriginack, ITTRIGINACK),
+ coresight_cti_reg_wo(itchinack, ITCHINACK),
#endif
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index efa39820acec..8aa6e4f83e42 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -655,27 +655,15 @@ static const struct file_operations etb_fops = {
.llseek = no_llseek,
};
-#define coresight_etb10_reg(name, offset) \
- coresight_simple_reg32(struct etb_drvdata, name, offset)
-
-coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
-coresight_etb10_reg(sts, ETB_STATUS_REG);
-coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
-coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
-coresight_etb10_reg(trg, ETB_TRG);
-coresight_etb10_reg(ctl, ETB_CTL_REG);
-coresight_etb10_reg(ffsr, ETB_FFSR);
-coresight_etb10_reg(ffcr, ETB_FFCR);
-
static struct attribute *coresight_etb_mgmt_attrs[] = {
- &dev_attr_rdp.attr,
- &dev_attr_sts.attr,
- &dev_attr_rrp.attr,
- &dev_attr_rwp.attr,
- &dev_attr_trg.attr,
- &dev_attr_ctl.attr,
- &dev_attr_ffsr.attr,
- &dev_attr_ffcr.attr,
+ coresight_simple_reg32(rdp, ETB_RAM_DEPTH_REG),
+ coresight_simple_reg32(sts, ETB_STATUS_REG),
+ coresight_simple_reg32(rrp, ETB_RAM_READ_POINTER),
+ coresight_simple_reg32(rwp, ETB_RAM_WRITE_POINTER),
+ coresight_simple_reg32(trg, ETB_TRG),
+ coresight_simple_reg32(ctl, ETB_CTL_REG),
+ coresight_simple_reg32(ffsr, ETB_FFSR),
+ coresight_simple_reg32(ffcr, ETB_FFCR),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 68fcbf4ce7a8..fd81eca3ec18 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1252,31 +1252,17 @@ static struct attribute *coresight_etm_attrs[] = {
NULL,
};
-#define coresight_etm3x_reg(name, offset) \
- coresight_simple_reg32(struct etm_drvdata, name, offset)
-
-coresight_etm3x_reg(etmccr, ETMCCR);
-coresight_etm3x_reg(etmccer, ETMCCER);
-coresight_etm3x_reg(etmscr, ETMSCR);
-coresight_etm3x_reg(etmidr, ETMIDR);
-coresight_etm3x_reg(etmcr, ETMCR);
-coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
-coresight_etm3x_reg(etmteevr, ETMTEEVR);
-coresight_etm3x_reg(etmtssvr, ETMTSSCR);
-coresight_etm3x_reg(etmtecr1, ETMTECR1);
-coresight_etm3x_reg(etmtecr2, ETMTECR2);
-
static struct attribute *coresight_etm_mgmt_attrs[] = {
- &dev_attr_etmccr.attr,
- &dev_attr_etmccer.attr,
- &dev_attr_etmscr.attr,
- &dev_attr_etmidr.attr,
- &dev_attr_etmcr.attr,
- &dev_attr_etmtraceidr.attr,
- &dev_attr_etmteevr.attr,
- &dev_attr_etmtssvr.attr,
- &dev_attr_etmtecr1.attr,
- &dev_attr_etmtecr2.attr,
+ coresight_simple_reg32(etmccr, ETMCCR),
+ coresight_simple_reg32(etmccer, ETMCCER),
+ coresight_simple_reg32(etmscr, ETMSCR),
+ coresight_simple_reg32(etmidr, ETMIDR),
+ coresight_simple_reg32(etmcr, ETMCR),
+ coresight_simple_reg32(etmtraceidr, ETMTRACEIDR),
+ coresight_simple_reg32(etmteevr, ETMTEEVR),
+ coresight_simple_reg32(etmtssvr, ETMTSSCR),
+ coresight_simple_reg32(etmtecr1, ETMTECR1),
+ coresight_simple_reg32(etmtecr2, ETMTECR2),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 6ea8181816fc..9cac848cffaf 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2306,6 +2306,34 @@ static ssize_t cpu_show(struct device *dev,
}
static DEVICE_ATTR_RO(cpu);
+static ssize_t ts_source_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!drvdata->trfcr) {
+ val = -1;
+ goto out;
+ }
+
+ switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
+ case TRFCR_ELx_TS_VIRTUAL:
+ case TRFCR_ELx_TS_GUEST_PHYSICAL:
+ case TRFCR_ELx_TS_PHYSICAL:
+ val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
+ break;
+ default:
+ val = -1;
+ break;
+ }
+
+out:
+ return sysfs_emit(buf, "%d\n", val);
+}
+static DEVICE_ATTR_RO(ts_source);
+
static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_nr_pe_cmp.attr,
&dev_attr_nr_addr_cmp.attr,
@@ -2360,6 +2388,7 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_vmid_val.attr,
&dev_attr_vmid_masks.attr,
&dev_attr_cpu.attr,
+ &dev_attr_ts_source.attr,
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index ff1dd2092ac5..595ce5862056 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -39,32 +39,37 @@
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+struct cs_pair_attribute {
+ struct device_attribute attr;
+ u32 lo_off;
+ u32 hi_off;
+};
+
+struct cs_off_attribute {
+ struct device_attribute attr;
+ u32 off;
+};
-typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
-#define __coresight_simple_func(type, func, name, lo_off, hi_off) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- type *drvdata = dev_get_drvdata(_dev->parent); \
- coresight_read_fn fn = func; \
- u64 val; \
- pm_runtime_get_sync(_dev->parent); \
- if (fn) \
- val = (u64)fn(_dev->parent, lo_off); \
- else \
- val = coresight_read_reg_pair(drvdata->base, \
- lo_off, hi_off); \
- pm_runtime_put_sync(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val); \
-} \
-static DEVICE_ATTR_RO(name)
-
-#define coresight_simple_func(type, func, name, offset) \
- __coresight_simple_func(type, func, name, offset, -1)
-#define coresight_simple_reg32(type, name, offset) \
- __coresight_simple_func(type, NULL, name, offset, -1)
-#define coresight_simple_reg64(type, name, lo_off, hi_off) \
- __coresight_simple_func(type, NULL, name, lo_off, hi_off)
+extern ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf);
+
+#define coresight_simple_reg32(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_simple_show32, NULL), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_simple_reg64(name, lo_off, hi_off) \
+ (&((struct cs_pair_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_simple_show_pair, NULL), \
+ lo_off, hi_off \
+ } \
+ })[0].attr.attr)
extern const u32 coresight_barrier_pkt[4];
#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(coresight_barrier_pkt))
@@ -127,25 +132,6 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
-static inline u64
-coresight_read_reg_pair(void __iomem *addr, s32 lo_offset, s32 hi_offset)
-{
- u64 val;
-
- val = readl_relaxed(addr + lo_offset);
- val |= (hi_offset < 0) ? 0 :
- (u64)readl_relaxed(addr + hi_offset) << 32;
- return val;
-}
-
-static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
- s32 lo_offset, s32 hi_offset)
-{
- writel_relaxed((u32)val, addr + lo_offset);
- if (hi_offset >= 0)
- writel_relaxed((u32)(val >> 32), addr + hi_offset);
-}
-
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b86acbc74cf0..4dd50546d7e4 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -196,15 +196,9 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
-#define coresight_replicator_reg(name, offset) \
- coresight_simple_reg32(struct replicator_drvdata, name, offset)
-
-coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
-coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
-
static struct attribute *replicator_mgmt_attrs[] = {
- &dev_attr_idfilter0.attr,
- &dev_attr_idfilter1.attr,
+ coresight_simple_reg32(idfilter0, REPLICATOR_IDFILTER0),
+ coresight_simple_reg32(idfilter1, REPLICATOR_IDFILTER1),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index bb14a3a8a921..463f449cfb79 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -634,22 +634,6 @@ static ssize_t traceid_store(struct device *dev,
}
static DEVICE_ATTR_RW(traceid);
-#define coresight_stm_reg(name, offset) \
- coresight_simple_reg32(struct stm_drvdata, name, offset)
-
-coresight_stm_reg(tcsr, STMTCSR);
-coresight_stm_reg(tsfreqr, STMTSFREQR);
-coresight_stm_reg(syncr, STMSYNCR);
-coresight_stm_reg(sper, STMSPER);
-coresight_stm_reg(spter, STMSPTER);
-coresight_stm_reg(privmaskr, STMPRIVMASKR);
-coresight_stm_reg(spscr, STMSPSCR);
-coresight_stm_reg(spmscr, STMSPMSCR);
-coresight_stm_reg(spfeat1r, STMSPFEAT1R);
-coresight_stm_reg(spfeat2r, STMSPFEAT2R);
-coresight_stm_reg(spfeat3r, STMSPFEAT3R);
-coresight_stm_reg(devid, CORESIGHT_DEVID);
-
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
&dev_attr_hwevent_select.attr,
@@ -660,18 +644,18 @@ static struct attribute *coresight_stm_attrs[] = {
};
static struct attribute *coresight_stm_mgmt_attrs[] = {
- &dev_attr_tcsr.attr,
- &dev_attr_tsfreqr.attr,
- &dev_attr_syncr.attr,
- &dev_attr_sper.attr,
- &dev_attr_spter.attr,
- &dev_attr_privmaskr.attr,
- &dev_attr_spscr.attr,
- &dev_attr_spmscr.attr,
- &dev_attr_spfeat1r.attr,
- &dev_attr_spfeat2r.attr,
- &dev_attr_spfeat3r.attr,
- &dev_attr_devid.attr,
+ coresight_simple_reg32(tcsr, STMTCSR),
+ coresight_simple_reg32(tsfreqr, STMTSFREQR),
+ coresight_simple_reg32(syncr, STMSYNCR),
+ coresight_simple_reg32(sper, STMSPER),
+ coresight_simple_reg32(spter, STMSPTER),
+ coresight_simple_reg32(privmaskr, STMPRIVMASKR),
+ coresight_simple_reg32(spscr, STMSPSCR),
+ coresight_simple_reg32(spmscr, STMSPMSCR),
+ coresight_simple_reg32(spfeat1r, STMSPFEAT1R),
+ coresight_simple_reg32(spfeat2r, STMSPFEAT2R),
+ coresight_simple_reg32(spfeat3r, STMSPFEAT3R),
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index d0276af82494..07abf28ad725 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -251,41 +251,21 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
return memwidth;
}
-#define coresight_tmc_reg(name, offset) \
- coresight_simple_reg32(struct tmc_drvdata, name, offset)
-#define coresight_tmc_reg64(name, lo_off, hi_off) \
- coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
-
-coresight_tmc_reg(rsz, TMC_RSZ);
-coresight_tmc_reg(sts, TMC_STS);
-coresight_tmc_reg(trg, TMC_TRG);
-coresight_tmc_reg(ctl, TMC_CTL);
-coresight_tmc_reg(ffsr, TMC_FFSR);
-coresight_tmc_reg(ffcr, TMC_FFCR);
-coresight_tmc_reg(mode, TMC_MODE);
-coresight_tmc_reg(pscr, TMC_PSCR);
-coresight_tmc_reg(axictl, TMC_AXICTL);
-coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
-coresight_tmc_reg(devid, CORESIGHT_DEVID);
-coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
-coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
-coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
-
static struct attribute *coresight_tmc_mgmt_attrs[] = {
- &dev_attr_rsz.attr,
- &dev_attr_sts.attr,
- &dev_attr_rrp.attr,
- &dev_attr_rwp.attr,
- &dev_attr_trg.attr,
- &dev_attr_ctl.attr,
- &dev_attr_ffsr.attr,
- &dev_attr_ffcr.attr,
- &dev_attr_mode.attr,
- &dev_attr_pscr.attr,
- &dev_attr_devid.attr,
- &dev_attr_dba.attr,
- &dev_attr_axictl.attr,
- &dev_attr_authstatus.attr,
+ coresight_simple_reg32(rsz, TMC_RSZ),
+ coresight_simple_reg32(sts, TMC_STS),
+ coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
+ coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
+ coresight_simple_reg32(trg, TMC_TRG),
+ coresight_simple_reg32(ctl, TMC_CTL),
+ coresight_simple_reg32(ffsr, TMC_FFSR),
+ coresight_simple_reg32(ffcr, TMC_FFCR),
+ coresight_simple_reg32(mode, TMC_MODE),
+ coresight_simple_reg32(pscr, TMC_PSCR),
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
+ coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
+ coresight_simple_reg32(axictl, TMC_AXICTL),
+ coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6bec20a392b3..66959557cf39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -282,12 +282,12 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
static inline u64 \
tmc_read_##name(struct tmc_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+ return csdev_access_relaxed_read_pair(&drvdata->csdev->access, lo_off, hi_off); \
} \
static inline void \
tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+ csdev_access_relaxed_write_pair(&drvdata->csdev->access, val, lo_off, hi_off); \
}
TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
diff --git a/drivers/hwtracing/ptt/Kconfig b/drivers/hwtracing/ptt/Kconfig
new file mode 100644
index 000000000000..6d46a09ffeb9
--- /dev/null
+++ b/drivers/hwtracing/ptt/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HISI_PTT
+ tristate "HiSilicon PCIe Tune and Trace Device"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HAS_DMA && HAS_IOMEM && PERF_EVENTS
+ help
+ HiSilicon PCIe Tune and Trace device exists as a PCIe RCiEP
+ device, and it provides support for PCIe traffic tuning and
+ tracing TLP headers to the memory.
+
+ This driver can also be built as a module. If so, the module
+ will be called hisi_ptt.
diff --git a/drivers/hwtracing/ptt/Makefile b/drivers/hwtracing/ptt/Makefile
new file mode 100644
index 000000000000..908c09a98161
--- /dev/null
+++ b/drivers/hwtracing/ptt/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_HISI_PTT) += hisi_ptt.o
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
new file mode 100644
index 000000000000..5d5526aa60c4
--- /dev/null
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon PCIe tune and trace device
+ *
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+
+#include "hisi_ptt.h"
+
+/* Dynamic CPU hotplug state used by PTT */
+static enum cpuhp_state hisi_ptt_pmu_online;
+
+static bool hisi_ptt_wait_tuning_finish(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ return !readl_poll_timeout(hisi_ptt->iobase + HISI_PTT_TUNING_INT_STAT,
+ val, !(val & HISI_PTT_TUNING_INT_STAT_MASK),
+ HISI_PTT_WAIT_POLL_INTERVAL_US,
+ HISI_PTT_WAIT_TUNE_TIMEOUT_US);
+}
+
+static ssize_t hisi_ptt_tune_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ struct dev_ext_attribute *ext_attr;
+ struct hisi_ptt_tune_desc *desc;
+ u32 reg;
+ u16 val;
+
+ ext_attr = container_of(attr, struct dev_ext_attribute, attr);
+ desc = ext_attr->var;
+
+ mutex_lock(&hisi_ptt->tune_lock);
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
+ reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
+ desc->event_code);
+ writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+
+ /* Write all 1 to indicates it's the read process */
+ writel(~0U, hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+
+ if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return -ETIMEDOUT;
+ }
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+ reg &= HISI_PTT_TUNING_DATA_VAL_MASK;
+ val = FIELD_GET(HISI_PTT_TUNING_DATA_VAL_MASK, reg);
+
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t hisi_ptt_tune_attr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ struct dev_ext_attribute *ext_attr;
+ struct hisi_ptt_tune_desc *desc;
+ u32 reg;
+ u16 val;
+
+ ext_attr = container_of(attr, struct dev_ext_attribute, attr);
+ desc = ext_attr->var;
+
+ if (kstrtou16(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&hisi_ptt->tune_lock);
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
+ reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
+ desc->event_code);
+ writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ writel(FIELD_PREP(HISI_PTT_TUNING_DATA_VAL_MASK, val),
+ hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+
+ if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return count;
+}
+
+#define HISI_PTT_TUNE_ATTR(_name, _val, _show, _store) \
+ static struct hisi_ptt_tune_desc _name##_desc = { \
+ .name = #_name, \
+ .event_code = (_val), \
+ }; \
+ static struct dev_ext_attribute hisi_ptt_##_name##_attr = { \
+ .attr = __ATTR(_name, 0600, _show, _store), \
+ .var = &_name##_desc, \
+ }
+
+#define HISI_PTT_TUNE_ATTR_COMMON(_name, _val) \
+ HISI_PTT_TUNE_ATTR(_name, _val, \
+ hisi_ptt_tune_attr_show, \
+ hisi_ptt_tune_attr_store)
+
+/*
+ * The value of the tuning event are composed of two parts: main event code
+ * in BIT[0,15] and subevent code in BIT[16,23]. For example, qox_tx_cpl is
+ * a subevent of 'Tx path QoS control' which for tuning the weight of Tx
+ * completion TLPs. See hisi_ptt.rst documentation for more information.
+ */
+#define HISI_PTT_TUNE_QOS_TX_CPL (0x4 | (3 << 16))
+#define HISI_PTT_TUNE_QOS_TX_NP (0x4 | (4 << 16))
+#define HISI_PTT_TUNE_QOS_TX_P (0x4 | (5 << 16))
+#define HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL (0x5 | (6 << 16))
+#define HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL (0x5 | (7 << 16))
+
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_cpl, HISI_PTT_TUNE_QOS_TX_CPL);
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_np, HISI_PTT_TUNE_QOS_TX_NP);
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_p, HISI_PTT_TUNE_QOS_TX_P);
+HISI_PTT_TUNE_ATTR_COMMON(rx_alloc_buf_level, HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL);
+HISI_PTT_TUNE_ATTR_COMMON(tx_alloc_buf_level, HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL);
+
+static struct attribute *hisi_ptt_tune_attrs[] = {
+ &hisi_ptt_qos_tx_cpl_attr.attr.attr,
+ &hisi_ptt_qos_tx_np_attr.attr.attr,
+ &hisi_ptt_qos_tx_p_attr.attr.attr,
+ &hisi_ptt_rx_alloc_buf_level_attr.attr.attr,
+ &hisi_ptt_tx_alloc_buf_level_attr.attr.attr,
+ NULL,
+};
+
+static struct attribute_group hisi_ptt_tune_group = {
+ .name = "tune",
+ .attrs = hisi_ptt_tune_attrs,
+};
+
+static u16 hisi_ptt_get_filter_val(u16 devid, bool is_port)
+{
+ if (is_port)
+ return BIT(HISI_PCIE_CORE_PORT_ID(devid & 0xff));
+
+ return devid;
+}
+
+static bool hisi_ptt_wait_trace_hw_idle(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_STS,
+ val, val & HISI_PTT_TRACE_IDLE,
+ HISI_PTT_WAIT_POLL_INTERVAL_US,
+ HISI_PTT_WAIT_TRACE_TIMEOUT_US);
+}
+
+static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS,
+ val, !val, HISI_PTT_RESET_POLL_INTERVAL_US,
+ HISI_PTT_RESET_TIMEOUT_US);
+}
+
+static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
+{
+ writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ hisi_ptt->trace_ctrl.started = false;
+}
+
+static int hisi_ptt_trace_start(struct hisi_ptt *hisi_ptt)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ u32 val;
+ int i;
+
+ /* Check device idle before start trace */
+ if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) {
+ pci_err(hisi_ptt->pdev, "Failed to start trace, the device is still busy\n");
+ return -EBUSY;
+ }
+
+ ctrl->started = true;
+
+ /* Reset the DMA before start tracing */
+ val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ val |= HISI_PTT_TRACE_CTRL_RST;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ hisi_ptt_wait_dma_reset_done(hisi_ptt);
+
+ val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ val &= ~HISI_PTT_TRACE_CTRL_RST;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ /* Reset the index of current buffer */
+ hisi_ptt->trace_ctrl.buf_index = 0;
+
+ /* Zero the trace buffers */
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++)
+ memset(ctrl->trace_buf[i].addr, 0, HISI_PTT_TRACE_BUF_SIZE);
+
+ /* Clear the interrupt status */
+ writel(HISI_PTT_TRACE_INT_STAT_MASK, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+ writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
+
+ /* Set the trace control register */
+ val = FIELD_PREP(HISI_PTT_TRACE_CTRL_TYPE_SEL, ctrl->type);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_RXTX_SEL, ctrl->direction);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_DATA_FORMAT, ctrl->format);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_TARGET_SEL, hisi_ptt->trace_ctrl.filter);
+ if (!hisi_ptt->trace_ctrl.is_port)
+ val |= HISI_PTT_TRACE_CTRL_FILTER_MODE;
+
+ /* Start the Trace */
+ val |= HISI_PTT_TRACE_CTRL_EN;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ return 0;
+}
+
+static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ struct perf_output_handle *handle = &ctrl->handle;
+ struct perf_event *event = handle->event;
+ struct hisi_ptt_pmu_buf *buf;
+ size_t size;
+ void *addr;
+
+ buf = perf_get_aux(handle);
+ if (!buf || !handle->size)
+ return -EINVAL;
+
+ addr = ctrl->trace_buf[ctrl->buf_index].addr;
+
+ /*
+ * If we're going to stop, read the size of already traced data from
+ * HISI_PTT_TRACE_WR_STS. Otherwise we're coming from the interrupt,
+ * the data size is always HISI_PTT_TRACE_BUF_SIZE.
+ */
+ if (stop) {
+ u32 reg;
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS);
+ size = FIELD_GET(HISI_PTT_TRACE_WR_STS_WRITE, reg);
+ } else {
+ size = HISI_PTT_TRACE_BUF_SIZE;
+ }
+
+ memcpy(buf->base + buf->pos, addr, size);
+ buf->pos += size;
+
+ /*
+ * Just commit the traced data if we're going to stop. Otherwise if the
+ * resident AUX buffer cannot contain the data of next trace buffer,
+ * apply a new one.
+ */
+ if (stop) {
+ perf_aux_output_end(handle, buf->pos);
+ } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
+ perf_aux_output_end(handle, buf->pos);
+
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf)
+ return -EINVAL;
+
+ buf->pos = handle->head % buf->length;
+ if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
+ perf_aux_output_end(handle, 0);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t hisi_ptt_isr(int irq, void *context)
+{
+ struct hisi_ptt *hisi_ptt = context;
+ u32 status, buf_idx;
+
+ status = readl(hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+ if (!(status & HISI_PTT_TRACE_INT_STAT_MASK))
+ return IRQ_NONE;
+
+ buf_idx = ffs(status) - 1;
+
+ /* Clear the interrupt status of buffer @buf_idx */
+ writel(status, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+
+ /*
+ * Update the AUX buffer and cache the current buffer index,
+ * as we need to know this and save the data when the trace
+ * is ended out of the interrupt handler. End the trace
+ * if the updating fails.
+ */
+ if (hisi_ptt_update_aux(hisi_ptt, buf_idx, false))
+ hisi_ptt_trace_end(hisi_ptt);
+ else
+ hisi_ptt->trace_ctrl.buf_index = (buf_idx + 1) % HISI_PTT_TRACE_BUF_CNT;
+
+ return IRQ_HANDLED;
+}
+
+static void hisi_ptt_irq_free_vectors(void *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
+
+static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+{
+ struct pci_dev *pdev = hisi_ptt->pdev;
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ pci_err(pdev, "failed to allocate irq vector, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_irq_free_vectors, pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ),
+ NULL, hisi_ptt_isr, 0,
+ DRV_NAME, hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to request irq %d, ret = %d\n",
+ pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
+{
+ struct hisi_ptt_filter_desc *filter;
+ struct hisi_ptt *hisi_ptt = data;
+
+ /*
+ * We won't fail the probe if filter allocation failed here. The filters
+ * should be partial initialized and users would know which filter fails
+ * through the log. Other functions of PTT device are still available.
+ */
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter) {
+ pci_err(hisi_ptt->pdev, "failed to add filter %s\n", pci_name(pdev));
+ return -ENOMEM;
+ }
+
+ filter->devid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ filter->is_port = true;
+ list_add_tail(&filter->list, &hisi_ptt->port_filters);
+
+ /* Update the available port mask */
+ hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true);
+ } else {
+ list_add_tail(&filter->list, &hisi_ptt->req_filters);
+ }
+
+ return 0;
+}
+
+static void hisi_ptt_release_filters(void *data)
+{
+ struct hisi_ptt_filter_desc *filter, *tmp;
+ struct hisi_ptt *hisi_ptt = data;
+
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list) {
+ list_del(&filter->list);
+ kfree(filter);
+ }
+
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list) {
+ list_del(&filter->list);
+ kfree(filter);
+ }
+}
+
+static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ struct device *dev = &hisi_ptt->pdev->dev;
+ int i;
+
+ ctrl->trace_buf = devm_kcalloc(dev, HISI_PTT_TRACE_BUF_CNT,
+ sizeof(*ctrl->trace_buf), GFP_KERNEL);
+ if (!ctrl->trace_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; ++i) {
+ ctrl->trace_buf[i].addr = dmam_alloc_coherent(dev, HISI_PTT_TRACE_BUF_SIZE,
+ &ctrl->trace_buf[i].dma,
+ GFP_KERNEL);
+ if (!ctrl->trace_buf[i].addr)
+ return -ENOMEM;
+ }
+
+ /* Configure the trace DMA buffer */
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) {
+ writel(lower_32_bits(ctrl->trace_buf[i].dma),
+ hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_LO_0 +
+ i * HISI_PTT_TRACE_ADDR_STRIDE);
+ writel(upper_32_bits(ctrl->trace_buf[i].dma),
+ hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_HI_0 +
+ i * HISI_PTT_TRACE_ADDR_STRIDE);
+ }
+ writel(HISI_PTT_TRACE_BUF_SIZE, hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_SIZE);
+
+ return 0;
+}
+
+static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt)
+{
+ struct pci_dev *pdev = hisi_ptt->pdev;
+ struct pci_bus *bus;
+ int ret;
+ u32 reg;
+
+ INIT_LIST_HEAD(&hisi_ptt->port_filters);
+ INIT_LIST_HEAD(&hisi_ptt->req_filters);
+
+ ret = hisi_ptt_config_trace_buf(hisi_ptt);
+ if (ret)
+ return ret;
+
+ /*
+ * The device range register provides the information about the root
+ * ports which the RCiEP can control and trace. The RCiEP and the root
+ * ports which it supports are on the same PCIe core, with same domain
+ * number but maybe different bus number. The device range register
+ * will tell us which root ports we can support, Bit[31:16] indicates
+ * the upper BDF numbers of the root port, while Bit[15:0] indicates
+ * the lower.
+ */
+ reg = readl(hisi_ptt->iobase + HISI_PTT_DEVICE_RANGE);
+ hisi_ptt->upper_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_UPPER, reg);
+ hisi_ptt->lower_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_LOWER, reg);
+
+ bus = pci_find_bus(pci_domain_nr(pdev->bus), PCI_BUS_NUM(hisi_ptt->upper_bdf));
+ if (bus)
+ pci_walk_bus(bus, hisi_ptt_init_filters, hisi_ptt);
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_release_filters, hisi_ptt);
+ if (ret)
+ return ret;
+
+ hisi_ptt->trace_ctrl.on_cpu = -1;
+ return 0;
+}
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ const cpumask_t *cpumask = cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *hisi_ptt_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_ptt_cpumask_attr_group = {
+ .attrs = hisi_ptt_cpumask_attrs,
+};
+
+/*
+ * Bit 19 indicates the filter type, 1 for Root Port filter and 0 for Requester
+ * filter. Bit[15:0] indicates the filter value, for Root Port filter it's
+ * a bit mask of desired ports and for Requester filter it's the Requester ID
+ * of the desired PCIe function. Bit[18:16] is reserved for extension.
+ *
+ * See hisi_ptt.rst documentation for detailed information.
+ */
+PMU_FORMAT_ATTR(filter, "config:0-19");
+PMU_FORMAT_ATTR(direction, "config:20-23");
+PMU_FORMAT_ATTR(type, "config:24-31");
+PMU_FORMAT_ATTR(format, "config:32-35");
+
+static struct attribute *hisi_ptt_pmu_format_attrs[] = {
+ &format_attr_filter.attr,
+ &format_attr_direction.attr,
+ &format_attr_type.attr,
+ &format_attr_format.attr,
+ NULL
+};
+
+static struct attribute_group hisi_ptt_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_ptt_pmu_format_attrs,
+};
+
+static const struct attribute_group *hisi_ptt_pmu_groups[] = {
+ &hisi_ptt_cpumask_attr_group,
+ &hisi_ptt_pmu_format_group,
+ &hisi_ptt_tune_group,
+ NULL
+};
+
+static int hisi_ptt_trace_valid_direction(u32 val)
+{
+ /*
+ * The direction values have different effects according to the data
+ * format (specified in the parentheses). TLP set A/B means different
+ * set of TLP types. See hisi_ptt.rst documentation for more details.
+ */
+ static const u32 hisi_ptt_trace_available_direction[] = {
+ 0, /* inbound(4DW) or reserved(8DW) */
+ 1, /* outbound(4DW) */
+ 2, /* {in, out}bound(4DW) or inbound(8DW), TLP set A */
+ 3, /* {in, out}bound(4DW) or inbound(8DW), TLP set B */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_direction); i++) {
+ if (val == hisi_ptt_trace_available_direction[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hisi_ptt_trace_valid_type(u32 val)
+{
+ /* Different types can be set simultaneously */
+ static const u32 hisi_ptt_trace_available_type[] = {
+ 1, /* posted_request */
+ 2, /* non-posted_request */
+ 4, /* completion */
+ };
+ int i;
+
+ if (!val)
+ return -EINVAL;
+
+ /*
+ * Walk the available list and clear the valid bits of
+ * the config. If there is any resident bit after the
+ * walk then the config is invalid.
+ */
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_type); i++)
+ val &= ~hisi_ptt_trace_available_type[i];
+
+ if (val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hisi_ptt_trace_valid_format(u32 val)
+{
+ static const u32 hisi_ptt_trace_availble_format[] = {
+ 0, /* 4DW */
+ 1, /* 8DW */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_availble_format); i++) {
+ if (val == hisi_ptt_trace_availble_format[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
+{
+ unsigned long val, port_mask = hisi_ptt->port_mask;
+ struct hisi_ptt_filter_desc *filter;
+
+ hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config);
+ val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config);
+
+ /*
+ * Port filters are defined as bit mask. For port filters, check
+ * the bits in the @val are within the range of hisi_ptt->port_mask
+ * and whether it's empty or not, otherwise user has specified
+ * some unsupported root ports.
+ *
+ * For Requester ID filters, walk the available filter list to see
+ * whether we have one matched.
+ */
+ if (!hisi_ptt->trace_ctrl.is_port) {
+ list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
+ if (val == hisi_ptt_get_filter_val(filter->devid, filter->is_port))
+ return 0;
+ }
+ } else if (bitmap_subset(&val, &port_mask, BITS_PER_LONG)) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ u32 val;
+
+ val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, event->attr.config);
+ hisi_ptt->trace_ctrl.filter = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
+ ctrl->direction = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
+ ctrl->type = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
+ ctrl->format = val;
+}
+
+static int hisi_ptt_pmu_event_init(struct perf_event *event)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ int ret;
+ u32 val;
+
+ if (event->cpu < 0) {
+ dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ return -ENOENT;
+
+ ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
+ ret = hisi_ptt_trace_valid_direction(val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
+ ret = hisi_ptt_trace_valid_type(val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
+ return hisi_ptt_trace_valid_format(val);
+}
+
+static void *hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
+{
+ struct hisi_ptt_pmu_buf *buf;
+ struct page **pagelist;
+ int i;
+
+ if (overwrite) {
+ dev_warn(event->pmu->dev, "Overwrite mode is not supported\n");
+ return NULL;
+ }
+
+ /* If the pages size less than buffers, we cannot start trace */
+ if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
+ return NULL;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ pagelist = kcalloc(nr_pages, sizeof(*pagelist), GFP_KERNEL);
+ if (!pagelist)
+ goto err;
+
+ for (i = 0; i < nr_pages; i++)
+ pagelist[i] = virt_to_page(pages[i]);
+
+ buf->base = vmap(pagelist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->base) {
+ kfree(pagelist);
+ goto err;
+ }
+
+ buf->nr_pages = nr_pages;
+ buf->length = nr_pages * PAGE_SIZE;
+ buf->pos = 0;
+
+ kfree(pagelist);
+ return buf;
+err:
+ kfree(buf);
+ return NULL;
+}
+
+static void hisi_ptt_pmu_free_aux(void *aux)
+{
+ struct hisi_ptt_pmu_buf *buf = aux;
+
+ vunmap(buf->base);
+ kfree(buf);
+}
+
+static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct perf_output_handle *handle = &hisi_ptt->trace_ctrl.handle;
+ struct hw_perf_event *hwc = &event->hw;
+ struct device *dev = event->pmu->dev;
+ struct hisi_ptt_pmu_buf *buf;
+ int cpu = event->cpu;
+ int ret;
+
+ hwc->state = 0;
+
+ /* Serialize the perf process if user specified several CPUs */
+ spin_lock(&hisi_ptt->pmu_lock);
+ if (hisi_ptt->trace_ctrl.started) {
+ dev_dbg(dev, "trace has already started\n");
+ goto stop;
+ }
+
+ /*
+ * Handle the interrupt on the same cpu which starts the trace to avoid
+ * context mismatch. Otherwise we'll trigger the WARN from the perf
+ * core in event_function_local(). If CPU passed is offline we'll fail
+ * here, just log it since we can do nothing here.
+ */
+ ret = irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+ cpumask_of(cpu));
+ if (ret)
+ dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+
+ hisi_ptt->trace_ctrl.on_cpu = cpu;
+
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf) {
+ dev_dbg(dev, "aux output begin failed\n");
+ goto stop;
+ }
+
+ buf->pos = handle->head % buf->length;
+
+ hisi_ptt_pmu_init_configs(hisi_ptt, event);
+
+ ret = hisi_ptt_trace_start(hisi_ptt);
+ if (ret) {
+ dev_dbg(dev, "trace start failed, ret = %d\n", ret);
+ perf_aux_output_end(handle, 0);
+ goto stop;
+ }
+
+ spin_unlock(&hisi_ptt->pmu_lock);
+ return;
+stop:
+ event->hw.state |= PERF_HES_STOPPED;
+ spin_unlock(&hisi_ptt->pmu_lock);
+}
+
+static void hisi_ptt_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ spin_lock(&hisi_ptt->pmu_lock);
+ if (hisi_ptt->trace_ctrl.started) {
+ hisi_ptt_trace_end(hisi_ptt);
+
+ if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt))
+ dev_warn(event->pmu->dev, "Device is still busy\n");
+
+ hisi_ptt_update_aux(hisi_ptt, hisi_ptt->trace_ctrl.buf_index, true);
+ }
+ spin_unlock(&hisi_ptt->pmu_lock);
+
+ hwc->state |= PERF_HES_STOPPED;
+ perf_event_update_userpage(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hisi_ptt_pmu_add(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int cpu = event->cpu;
+
+ /* Only allow the cpus on the device's node to add the event */
+ if (!cpumask_test_cpu(cpu, cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev))))
+ return 0;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START) {
+ hisi_ptt_pmu_start(event, PERF_EF_RELOAD);
+ if (hwc->state & PERF_HES_STOPPED)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
+{
+ hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
+}
+
+static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
+}
+
+static void hisi_ptt_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
+{
+ u16 core_id, sicl_id;
+ char *pmu_name;
+ u32 reg;
+ int ret;
+
+ ret = cpuhp_state_add_instance_nocalls(hisi_ptt_pmu_online,
+ &hisi_ptt->hotplug_node);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&hisi_ptt->pdev->dev,
+ hisi_ptt_remove_cpuhp_instance,
+ &hisi_ptt->hotplug_node);
+ if (ret)
+ return ret;
+
+ mutex_init(&hisi_ptt->tune_lock);
+ spin_lock_init(&hisi_ptt->pmu_lock);
+
+ hisi_ptt->hisi_ptt_pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+ .task_ctx_nr = perf_sw_context,
+ .attr_groups = hisi_ptt_pmu_groups,
+ .event_init = hisi_ptt_pmu_event_init,
+ .setup_aux = hisi_ptt_pmu_setup_aux,
+ .free_aux = hisi_ptt_pmu_free_aux,
+ .start = hisi_ptt_pmu_start,
+ .stop = hisi_ptt_pmu_stop,
+ .add = hisi_ptt_pmu_add,
+ .del = hisi_ptt_pmu_del,
+ };
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
+ core_id = FIELD_GET(HISI_PTT_CORE_ID, reg);
+ sicl_id = FIELD_GET(HISI_PTT_SICL_ID, reg);
+
+ pmu_name = devm_kasprintf(&hisi_ptt->pdev->dev, GFP_KERNEL, "hisi_ptt%u_%u",
+ sicl_id, core_id);
+ if (!pmu_name)
+ return -ENOMEM;
+
+ ret = perf_pmu_register(&hisi_ptt->hisi_ptt_pmu, pmu_name, -1);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
+ hisi_ptt_unregister_pmu,
+ &hisi_ptt->hisi_ptt_pmu);
+}
+
+/*
+ * The DMA of PTT trace can only use direct mappings due to some
+ * hardware restriction. Check whether there is no IOMMU or the
+ * policy of the IOMMU domain is passthrough, otherwise the trace
+ * cannot work.
+ *
+ * The PTT device is supposed to behind an ARM SMMUv3, which
+ * should have passthrough the device by a quirk.
+ */
+static int hisi_ptt_check_iommu_mapping(struct pci_dev *pdev)
+{
+ struct iommu_domain *iommu_domain;
+
+ iommu_domain = iommu_get_domain_for_dev(&pdev->dev);
+ if (!iommu_domain || iommu_domain->type == IOMMU_DOMAIN_IDENTITY)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int hisi_ptt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct hisi_ptt *hisi_ptt;
+ int ret;
+
+ ret = hisi_ptt_check_iommu_mapping(pdev);
+ if (ret) {
+ pci_err(pdev, "requires direct DMA mappings\n");
+ return ret;
+ }
+
+ hisi_ptt = devm_kzalloc(&pdev->dev, sizeof(*hisi_ptt), GFP_KERNEL);
+ if (!hisi_ptt)
+ return -ENOMEM;
+
+ hisi_ptt->pdev = pdev;
+ pci_set_drvdata(pdev, hisi_ptt);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pci_err(pdev, "failed to enable device, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
+ if (ret) {
+ pci_err(pdev, "failed to remap io memory, ret = %d\n", ret);
+ return ret;
+ }
+
+ hisi_ptt->iobase = pcim_iomap_table(pdev)[2];
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pci_err(pdev, "failed to set 64 bit dma mask, ret = %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = hisi_ptt_register_irq(hisi_ptt);
+ if (ret)
+ return ret;
+
+ ret = hisi_ptt_init_ctrls(hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to init controls, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hisi_ptt_register_pmu(hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to register PMU device, ret = %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id hisi_ptt_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12e) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hisi_ptt_id_tbl);
+
+static struct pci_driver hisi_ptt_driver = {
+ .name = DRV_NAME,
+ .id_table = hisi_ptt_id_tbl,
+ .probe = hisi_ptt_probe,
+};
+
+static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_ptt *hisi_ptt;
+ struct device *dev;
+ int target, src;
+
+ hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node);
+ src = hisi_ptt->trace_ctrl.on_cpu;
+ dev = hisi_ptt->hisi_ptt_pmu.dev;
+
+ if (!hisi_ptt->trace_ctrl.started || src != cpu)
+ return 0;
+
+ target = cpumask_any_but(cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)), cpu);
+ if (target >= nr_cpu_ids) {
+ dev_err(dev, "no available cpu for perf context migration\n");
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&hisi_ptt->hisi_ptt_pmu, src, target);
+
+ /*
+ * Also make sure the interrupt bind to the migrated CPU as well. Warn
+ * the user on failure here.
+ */
+ if (irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+ cpumask_of(target)))
+ dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+
+ hisi_ptt->trace_ctrl.on_cpu = target;
+ return 0;
+}
+
+static int __init hisi_ptt_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRV_NAME, NULL,
+ hisi_ptt_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ hisi_ptt_pmu_online = ret;
+
+ ret = pci_register_driver(&hisi_ptt_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_ptt_pmu_online);
+
+ return ret;
+}
+module_init(hisi_ptt_init);
+
+static void __exit hisi_ptt_exit(void)
+{
+ pci_unregister_driver(&hisi_ptt_driver);
+ cpuhp_remove_multi_state(hisi_ptt_pmu_online);
+}
+module_exit(hisi_ptt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon PCIe tune and trace device");
diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h
new file mode 100644
index 000000000000..5beb1648c93a
--- /dev/null
+++ b/drivers/hwtracing/ptt/hisi_ptt.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver for HiSilicon PCIe tune and trace device
+ *
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+
+#ifndef _HISI_PTT_H
+#define _HISI_PTT_H
+
+#include <linux/bits.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define DRV_NAME "hisi_ptt"
+
+/*
+ * The definition of the device registers and register fields.
+ */
+#define HISI_PTT_TUNING_CTRL 0x0000
+#define HISI_PTT_TUNING_CTRL_CODE GENMASK(15, 0)
+#define HISI_PTT_TUNING_CTRL_SUB GENMASK(23, 16)
+#define HISI_PTT_TUNING_DATA 0x0004
+#define HISI_PTT_TUNING_DATA_VAL_MASK GENMASK(15, 0)
+#define HISI_PTT_TRACE_ADDR_SIZE 0x0800
+#define HISI_PTT_TRACE_ADDR_BASE_LO_0 0x0810
+#define HISI_PTT_TRACE_ADDR_BASE_HI_0 0x0814
+#define HISI_PTT_TRACE_ADDR_STRIDE 0x8
+#define HISI_PTT_TRACE_CTRL 0x0850
+#define HISI_PTT_TRACE_CTRL_EN BIT(0)
+#define HISI_PTT_TRACE_CTRL_RST BIT(1)
+#define HISI_PTT_TRACE_CTRL_RXTX_SEL GENMASK(3, 2)
+#define HISI_PTT_TRACE_CTRL_TYPE_SEL GENMASK(7, 4)
+#define HISI_PTT_TRACE_CTRL_DATA_FORMAT BIT(14)
+#define HISI_PTT_TRACE_CTRL_FILTER_MODE BIT(15)
+#define HISI_PTT_TRACE_CTRL_TARGET_SEL GENMASK(31, 16)
+#define HISI_PTT_TRACE_INT_STAT 0x0890
+#define HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0)
+#define HISI_PTT_TRACE_INT_MASK 0x0894
+#define HISI_PTT_TUNING_INT_STAT 0x0898
+#define HISI_PTT_TUNING_INT_STAT_MASK BIT(0)
+#define HISI_PTT_TRACE_WR_STS 0x08a0
+#define HISI_PTT_TRACE_WR_STS_WRITE GENMASK(27, 0)
+#define HISI_PTT_TRACE_WR_STS_BUFFER GENMASK(29, 28)
+#define HISI_PTT_TRACE_STS 0x08b0
+#define HISI_PTT_TRACE_IDLE BIT(0)
+#define HISI_PTT_DEVICE_RANGE 0x0fe0
+#define HISI_PTT_DEVICE_RANGE_UPPER GENMASK(31, 16)
+#define HISI_PTT_DEVICE_RANGE_LOWER GENMASK(15, 0)
+#define HISI_PTT_LOCATION 0x0fe8
+#define HISI_PTT_CORE_ID GENMASK(15, 0)
+#define HISI_PTT_SICL_ID GENMASK(31, 16)
+
+/* Parameters of PTT trace DMA part. */
+#define HISI_PTT_TRACE_DMA_IRQ 0
+#define HISI_PTT_TRACE_BUF_CNT 4
+#define HISI_PTT_TRACE_BUF_SIZE SZ_4M
+#define HISI_PTT_TRACE_TOTAL_BUF_SIZE (HISI_PTT_TRACE_BUF_SIZE * \
+ HISI_PTT_TRACE_BUF_CNT)
+/* Wait time for hardware DMA to reset */
+#define HISI_PTT_RESET_TIMEOUT_US 10UL
+#define HISI_PTT_RESET_POLL_INTERVAL_US 1UL
+/* Poll timeout and interval for waiting hardware work to finish */
+#define HISI_PTT_WAIT_TUNE_TIMEOUT_US 1000000UL
+#define HISI_PTT_WAIT_TRACE_TIMEOUT_US 100UL
+#define HISI_PTT_WAIT_POLL_INTERVAL_US 10UL
+
+#define HISI_PCIE_CORE_PORT_ID(devfn) ((PCI_SLOT(devfn) & 0x7) << 1)
+
+/* Definition of the PMU configs */
+#define HISI_PTT_PMU_FILTER_IS_PORT BIT(19)
+#define HISI_PTT_PMU_FILTER_VAL_MASK GENMASK(15, 0)
+#define HISI_PTT_PMU_DIRECTION_MASK GENMASK(23, 20)
+#define HISI_PTT_PMU_TYPE_MASK GENMASK(31, 24)
+#define HISI_PTT_PMU_FORMAT_MASK GENMASK(35, 32)
+
+/**
+ * struct hisi_ptt_tune_desc - Describe tune event for PTT tune
+ * @hisi_ptt: PTT device this tune event belongs to
+ * @name: name of this event
+ * @event_code: code of the event
+ */
+struct hisi_ptt_tune_desc {
+ struct hisi_ptt *hisi_ptt;
+ const char *name;
+ u32 event_code;
+};
+
+/**
+ * struct hisi_ptt_dma_buffer - Describe a single trace buffer of PTT trace.
+ * The detail of the data format is described
+ * in the documentation of PTT device.
+ * @dma: DMA address of this buffer visible to the device
+ * @addr: virtual address of this buffer visible to the cpu
+ */
+struct hisi_ptt_dma_buffer {
+ dma_addr_t dma;
+ void *addr;
+};
+
+/**
+ * struct hisi_ptt_trace_ctrl - Control and status of PTT trace
+ * @trace_buf: array of the trace buffers for holding the trace data.
+ * the length will be HISI_PTT_TRACE_BUF_CNT.
+ * @handle: perf output handle of current trace session
+ * @buf_index: the index of current using trace buffer
+ * @on_cpu: current tracing cpu
+ * @started: current trace status, true for started
+ * @is_port: whether we're tracing root port or not
+ * @direction: direction of the TLP headers to trace
+ * @filter: filter value for tracing the TLP headers
+ * @format: format of the TLP headers to trace
+ * @type: type of the TLP headers to trace
+ */
+struct hisi_ptt_trace_ctrl {
+ struct hisi_ptt_dma_buffer *trace_buf;
+ struct perf_output_handle handle;
+ u32 buf_index;
+ int on_cpu;
+ bool started;
+ bool is_port;
+ u32 direction:2;
+ u32 filter:16;
+ u32 format:1;
+ u32 type:4;
+};
+
+/**
+ * struct hisi_ptt_filter_desc - Descriptor of the PTT trace filter
+ * @list: entry of this descriptor in the filter list
+ * @is_port: the PCI device of the filter is a Root Port or not
+ * @devid: the PCI device's devid of the filter
+ */
+struct hisi_ptt_filter_desc {
+ struct list_head list;
+ bool is_port;
+ u16 devid;
+};
+
+/**
+ * struct hisi_ptt_pmu_buf - Descriptor of the AUX buffer of PTT trace
+ * @length: size of the AUX buffer
+ * @nr_pages: number of pages of the AUX buffer
+ * @base: start address of AUX buffer
+ * @pos: position in the AUX buffer to commit traced data
+ */
+struct hisi_ptt_pmu_buf {
+ size_t length;
+ int nr_pages;
+ void *base;
+ long pos;
+};
+
+/**
+ * struct hisi_ptt - Per PTT device data
+ * @trace_ctrl: the control information of PTT trace
+ * @hotplug_node: node for register cpu hotplug event
+ * @hisi_ptt_pmu: the pum device of trace
+ * @iobase: base IO address of the device
+ * @pdev: pci_dev of this PTT device
+ * @tune_lock: lock to serialize the tune process
+ * @pmu_lock: lock to serialize the perf process
+ * @upper_bdf: the upper BDF range of the PCI devices managed by this PTT device
+ * @lower_bdf: the lower BDF range of the PCI devices managed by this PTT device
+ * @port_filters: the filter list of root ports
+ * @req_filters: the filter list of requester ID
+ * @port_mask: port mask of the managed root ports
+ */
+struct hisi_ptt {
+ struct hisi_ptt_trace_ctrl trace_ctrl;
+ struct hlist_node hotplug_node;
+ struct pmu hisi_ptt_pmu;
+ void __iomem *iobase;
+ struct pci_dev *pdev;
+ struct mutex tune_lock;
+ spinlock_t pmu_lock;
+ u32 upper_bdf;
+ u32 lower_bdf;
+
+ /*
+ * The trace TLP headers can either be filtered by certain
+ * root port, or by the requester ID. Organize the filters
+ * by @port_filters and @req_filters here. The mask of all
+ * the valid ports is also cached for doing sanity check
+ * of user input.
+ */
+ struct list_head port_filters;
+ struct list_head req_filters;
+ u16 port_mask;
+};
+
+#define to_hisi_ptt(pmu) container_of(pmu, struct hisi_ptt, hisi_ptt_pmu)
+
+#endif /* _HISI_PTT_H */
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 185dedfebbac..c64c381b69b7 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -244,6 +244,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
u32 command, irq_handled = 0;
struct i2c_client *slave = bus->slave;
u8 value;
+ int ret;
if (!slave)
return 0;
@@ -311,7 +312,13 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
break;
case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
- i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ /*
+ * Slave ACK's on this address phase already but as the backend driver
+ * returns an errno, the bus driver should nack the next incoming byte.
+ */
+ if (ret < 0)
+ writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG);
break;
case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 70b80e710990..4d3a3b464ecd 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -126,8 +126,9 @@
* status codes
*/
#define STATUS_IDLE 0x0
-#define STATUS_WRITE_IN_PROGRESS 0x1
-#define STATUS_READ_IN_PROGRESS 0x2
+#define STATUS_ACTIVE 0x1
+#define STATUS_WRITE_IN_PROGRESS 0x2
+#define STATUS_READ_IN_PROGRESS 0x4
/*
* operation modes
@@ -334,12 +335,14 @@ void i2c_dw_disable_int(struct dw_i2c_dev *dev);
static inline void __i2c_dw_enable(struct dw_i2c_dev *dev)
{
+ dev->status |= STATUS_ACTIVE;
regmap_write(dev->map, DW_IC_ENABLE, 1);
}
static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev)
{
regmap_write(dev->map, DW_IC_ENABLE, 0);
+ dev->status &= ~STATUS_ACTIVE;
}
void __i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 44a94b225ed8..dc3c5a15a95b 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -716,6 +716,19 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
u32 stat;
stat = i2c_dw_read_clear_intrbits(dev);
+
+ if (!(dev->status & STATUS_ACTIVE)) {
+ /*
+ * Unexpected interrupt in driver point of view. State
+ * variables are either unset or stale so acknowledge and
+ * disable interrupts for suppressing further interrupts if
+ * interrupt really came from this HW (E.g. firmware has left
+ * the HW active).
+ */
+ regmap_write(dev->map, DW_IC_INTR_MASK, 0);
+ return 0;
+ }
+
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
index f5342201eb6b..09af75921147 100644
--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -708,7 +708,7 @@ static void pci1xxxx_i2c_init(struct pci1xxxx_i2c *i2c)
void __iomem *p2 = i2c->i2c_base + SMBUS_STATUS_REG_OFF;
void __iomem *p1 = i2c->i2c_base + SMB_GPR_REG;
u8 regval;
- u8 ret;
+ int ret;
ret = set_sys_lock(i2c);
if (ret == -EPERM) {
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index ea48e6a9cfca..87739fb4388b 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -807,6 +807,7 @@ static const struct cci_data cci_v2_data = {
};
static const struct of_device_id cci_dt_match[] = {
+ { .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index da6568a20177..4dd777cc0c89 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -137,6 +137,11 @@ static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = {
{}
};
+struct i2c_acpi_irq_context {
+ int irq;
+ bool wake_capable;
+};
+
static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct i2c_acpi_lookup *lookup)
{
@@ -168,13 +173,19 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
return 0;
}
-static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
+static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data)
{
- int *irq = data;
+ struct i2c_acpi_irq_context *irq_ctx = data;
struct resource r;
- if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
- *irq = i2c_dev_irq_from_resources(&r, 1);
+ if (irq_ctx->irq > 0)
+ return 1;
+
+ if (!acpi_dev_resource_interrupt(ares, 0, &r))
+ return 1;
+
+ irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1);
+ irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE;
return 1; /* No need to add resource to the list */
}
@@ -182,31 +193,40 @@ static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
/**
* i2c_acpi_get_irq - get device IRQ number from ACPI
* @client: Pointer to the I2C client device
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* Find the IRQ number used by a specific client device.
*
* Return: The IRQ number or an error code.
*/
-int i2c_acpi_get_irq(struct i2c_client *client)
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
struct list_head resource_list;
- int irq = -ENOENT;
+ struct i2c_acpi_irq_context irq_ctx = {
+ .irq = -ENOENT,
+ };
int ret;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- i2c_acpi_add_resource, &irq);
+ i2c_acpi_add_irq_resource, &irq_ctx);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resource_list);
- if (irq == -ENOENT)
- irq = acpi_dev_gpio_irq_get(adev, 0);
+ if (irq_ctx.irq == -ENOENT)
+ irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable);
+
+ if (irq_ctx.irq < 0)
+ return irq_ctx.irq;
+
+ if (wake_capable)
+ *wake_capable = irq_ctx.wake_capable;
- return irq;
+ return irq_ctx.irq;
}
static int i2c_acpi_get_info(struct acpi_device *adev,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 8c7e3494ca5f..b4edf10e8fd0 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -487,7 +487,11 @@ static int i2c_device_probe(struct device *dev)
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
} else if (ACPI_COMPANION(dev)) {
- irq = i2c_acpi_get_irq(client);
+ bool wake_capable;
+
+ irq = i2c_acpi_get_irq(client, &wake_capable);
+ if (irq > 0 && wake_capable)
+ client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
status = irq;
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 87e2c914f1c5..1247e6e6e975 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -61,11 +61,11 @@ static inline int __i2c_check_suspended(struct i2c_adapter *adap)
#ifdef CONFIG_ACPI
void i2c_acpi_register_devices(struct i2c_adapter *adap);
-int i2c_acpi_get_irq(struct i2c_client *client);
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
-static inline int i2c_acpi_get_irq(struct i2c_client *client)
+static inline int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
return 0;
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7850287dfe7a..351c81a929a6 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1379,6 +1379,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
+ if (old_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, old_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
}
if (master->ops->reattach_i3c_dev) {
@@ -1908,10 +1911,6 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
i3c_master_free_i3c_dev(olddev);
}
- ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
- if (ret)
- goto err_detach_dev;
-
/*
* Depending on our previous state, the expected dynamic address might
* differ:
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 35798712f811..ffac66db7ac9 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -541,6 +541,19 @@ config MMA9553
To compile this driver as a module, choose M here: the module
will be called mma9553.
+config MSA311
+ tristate "MEMSensing Digital 3-Axis Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the MEMSensing MSA311
+ accelerometer driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called msa311.
+
config MXC4005
tristate "Memsic MXC4005XC 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 4d8792668838..5e45b5fa5ab5 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -58,6 +58,8 @@ obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o
obj-$(CONFIG_MMA9551) += mma9551.o
obj-$(CONFIG_MMA9553) += mma9553.o
+obj-$(CONFIG_MSA311) += msa311.o
+
obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
diff --git a/drivers/iio/accel/adxl313.h b/drivers/iio/accel/adxl313.h
index 4415f2fc07e1..72f624af4686 100644
--- a/drivers/iio/accel/adxl313.h
+++ b/drivers/iio/accel/adxl313.h
@@ -8,6 +8,8 @@
#ifndef _ADXL313_H_
#define _ADXL313_H_
+#include <linux/iio/iio.h>
+
/* ADXL313 register definitions */
#define ADXL313_REG_DEVID0 0x00
#define ADXL313_REG_DEVID1 0x01
@@ -26,6 +28,7 @@
#define ADXL313_REG_FIFO_STATUS 0x39
#define ADXL313_DEVID0 0xAD
+#define ADXL313_DEVID0_ADXL312_314 0xE5
#define ADXL313_DEVID1 0x1D
#define ADXL313_PARTID 0xCB
#define ADXL313_SOFT_RESET 0x52
@@ -37,18 +40,46 @@
#define ADXL313_MEASUREMENT_MODE BIT(3)
#define ADXL313_RANGE_MSK GENMASK(1, 0)
-#define ADXL313_RANGE_4G 3
+#define ADXL313_RANGE_MAX 3
#define ADXL313_FULL_RES BIT(3)
#define ADXL313_SPI_3WIRE BIT(6)
#define ADXL313_I2C_DISABLE BIT(6)
+extern const struct regmap_access_table adxl312_readable_regs_table;
extern const struct regmap_access_table adxl313_readable_regs_table;
+extern const struct regmap_access_table adxl314_readable_regs_table;
+extern const struct regmap_access_table adxl312_writable_regs_table;
extern const struct regmap_access_table adxl313_writable_regs_table;
+extern const struct regmap_access_table adxl314_writable_regs_table;
+
+enum adxl313_device_type {
+ ADXL312,
+ ADXL313,
+ ADXL314,
+};
+
+struct adxl313_data {
+ struct regmap *regmap;
+ const struct adxl313_chip_info *chip_info;
+ struct mutex lock; /* lock to protect transf_buf */
+ __le16 transf_buf __aligned(IIO_DMA_MINALIGN);
+};
+
+struct adxl313_chip_info {
+ const char *name;
+ enum adxl313_device_type type;
+ int scale_factor;
+ bool variable_range;
+ bool soft_reset;
+ int (*check_id)(struct device *dev, struct adxl313_data *data);
+};
+
+extern const struct adxl313_chip_info adxl31x_chip_info[];
int adxl313_core_probe(struct device *dev,
struct regmap *regmap,
- const char *name,
+ const struct adxl313_chip_info *chip_info,
int (*setup)(struct device *, struct regmap *));
#endif /* _ADXL313_H_ */
diff --git a/drivers/iio/accel/adxl313_core.c b/drivers/iio/accel/adxl313_core.c
index afeef779e1d0..4de0a41bd679 100644
--- a/drivers/iio/accel/adxl313_core.c
+++ b/drivers/iio/accel/adxl313_core.c
@@ -8,12 +8,18 @@
*/
#include <linux/bitfield.h>
-#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include "adxl313.h"
+static const struct regmap_range adxl312_readable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_DEVID0, ADXL313_REG_DEVID0),
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_FIFO_STATUS),
+};
+
static const struct regmap_range adxl313_readable_reg_range[] = {
regmap_reg_range(ADXL313_REG_DEVID0, ADXL313_REG_XID),
regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
@@ -22,12 +28,109 @@ static const struct regmap_range adxl313_readable_reg_range[] = {
regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_FIFO_STATUS),
};
+const struct regmap_access_table adxl312_readable_regs_table = {
+ .yes_ranges = adxl312_readable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_readable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl312_readable_regs_table, IIO_ADXL313);
+
const struct regmap_access_table adxl313_readable_regs_table = {
.yes_ranges = adxl313_readable_reg_range,
.n_yes_ranges = ARRAY_SIZE(adxl313_readable_reg_range),
};
EXPORT_SYMBOL_NS_GPL(adxl313_readable_regs_table, IIO_ADXL313);
+const struct regmap_access_table adxl314_readable_regs_table = {
+ .yes_ranges = adxl312_readable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_readable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl314_readable_regs_table, IIO_ADXL313);
+
+static int adxl312_check_id(struct device *dev,
+ struct adxl313_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID0_ADXL312_314)
+ dev_warn(dev, "Invalid manufacturer ID: %#02x\n", regval);
+
+ return 0;
+}
+
+static int adxl313_check_id(struct device *dev,
+ struct adxl313_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID0)
+ dev_warn(dev, "Invalid manufacturer ID: 0x%02x\n", regval);
+
+ /* Check DEVID1 and PARTID */
+ if (regval == ADXL313_DEVID0) {
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID1, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID1)
+ dev_warn(dev, "Invalid mems ID: 0x%02x\n", regval);
+
+ ret = regmap_read(data->regmap, ADXL313_REG_PARTID, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_PARTID)
+ dev_warn(dev, "Invalid device ID: 0x%02x\n", regval);
+ }
+
+ return 0;
+}
+
+const struct adxl313_chip_info adxl31x_chip_info[] = {
+ [ADXL312] = {
+ .name = "adxl312",
+ .type = ADXL312,
+ .scale_factor = 28425072,
+ .variable_range = true,
+ .soft_reset = false,
+ .check_id = &adxl312_check_id,
+ },
+ [ADXL313] = {
+ .name = "adxl313",
+ .type = ADXL313,
+ .scale_factor = 9576806,
+ .variable_range = true,
+ .soft_reset = true,
+ .check_id = &adxl313_check_id,
+ },
+ [ADXL314] = {
+ .name = "adxl314",
+ .type = ADXL314,
+ .scale_factor = 478858719,
+ .variable_range = false,
+ .soft_reset = false,
+ .check_id = &adxl312_check_id,
+ },
+};
+EXPORT_SYMBOL_NS_GPL(adxl31x_chip_info, IIO_ADXL313);
+
+static const struct regmap_range adxl312_writable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_INT_MAP),
+ regmap_reg_range(ADXL313_REG_DATA_FORMAT, ADXL313_REG_DATA_FORMAT),
+ regmap_reg_range(ADXL313_REG_FIFO_CTL, ADXL313_REG_FIFO_CTL),
+};
+
static const struct regmap_range adxl313_writable_reg_range[] = {
regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
@@ -37,17 +140,23 @@ static const struct regmap_range adxl313_writable_reg_range[] = {
regmap_reg_range(ADXL313_REG_FIFO_CTL, ADXL313_REG_FIFO_CTL),
};
+const struct regmap_access_table adxl312_writable_regs_table = {
+ .yes_ranges = adxl312_writable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_writable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl312_writable_regs_table, IIO_ADXL313);
+
const struct regmap_access_table adxl313_writable_regs_table = {
.yes_ranges = adxl313_writable_reg_range,
.n_yes_ranges = ARRAY_SIZE(adxl313_writable_reg_range),
};
EXPORT_SYMBOL_NS_GPL(adxl313_writable_regs_table, IIO_ADXL313);
-struct adxl313_data {
- struct regmap *regmap;
- struct mutex lock; /* lock to protect transf_buf */
- __le16 transf_buf __aligned(IIO_DMA_MINALIGN);
+const struct regmap_access_table adxl314_writable_regs_table = {
+ .yes_ranges = adxl312_writable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_writable_reg_range),
};
+EXPORT_SYMBOL_NS_GPL(adxl314_writable_regs_table, IIO_ADXL313);
static const int adxl313_odr_freqs[][2] = {
[0] = { 6, 250000 },
@@ -156,12 +265,10 @@ static int adxl313_read_raw(struct iio_dev *indio_dev,
*val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- /*
- * Scale for any g range is given in datasheet as
- * 1024 LSB/g = 0.0009765625 * 9.80665 = 0.009576806640625 m/s^2
- */
*val = 0;
- *val2 = 9576806;
+
+ *val2 = data->chip_info->scale_factor;
+
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(data->regmap,
@@ -170,7 +277,7 @@ static int adxl313_read_raw(struct iio_dev *indio_dev,
return ret;
/*
- * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * 8-bit resolution at minimum range, that is 4x accel data scale
* factor at full resolution
*/
*val = sign_extend32(regval, 7) * 4;
@@ -198,7 +305,7 @@ static int adxl313_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
/*
- * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * 8-bit resolution at minimum range, that is 4x accel data scale
* factor at full resolution
*/
if (clamp_val(val, -128 * 4, 127 * 4) != val)
@@ -223,14 +330,18 @@ static const struct iio_info adxl313_info = {
static int adxl313_setup(struct device *dev, struct adxl313_data *data,
int (*setup)(struct device *, struct regmap *))
{
- unsigned int regval;
int ret;
- /* Ensures the device is in a consistent state after start up */
- ret = regmap_write(data->regmap, ADXL313_REG_SOFT_RESET,
- ADXL313_SOFT_RESET);
- if (ret)
- return ret;
+ /*
+ * If sw reset available, ensures the device is in a consistent
+ * state after start up
+ */
+ if (data->chip_info->soft_reset) {
+ ret = regmap_write(data->regmap, ADXL313_REG_SOFT_RESET,
+ ADXL313_SOFT_RESET);
+ if (ret)
+ return ret;
+ }
if (setup) {
ret = setup(dev, data->regmap);
@@ -238,46 +349,25 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
return ret;
}
- ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ ret = data->chip_info->check_id(dev, data);
if (ret)
return ret;
- if (regval != ADXL313_DEVID0) {
- dev_err(dev, "Invalid manufacturer ID: 0x%02x\n", regval);
- return -ENODEV;
- }
-
- ret = regmap_read(data->regmap, ADXL313_REG_DEVID1, &regval);
- if (ret)
- return ret;
-
- if (regval != ADXL313_DEVID1) {
- dev_err(dev, "Invalid mems ID: 0x%02x\n", regval);
- return -ENODEV;
- }
-
- ret = regmap_read(data->regmap, ADXL313_REG_PARTID, &regval);
- if (ret)
- return ret;
+ /* Sets the range to maximum, full resolution, if applicable */
+ if (data->chip_info->variable_range) {
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_RANGE_MSK,
+ FIELD_PREP(ADXL313_RANGE_MSK, ADXL313_RANGE_MAX));
+ if (ret)
+ return ret;
- if (regval != ADXL313_PARTID) {
- dev_err(dev, "Invalid device ID: 0x%02x\n", regval);
- return -ENODEV;
+ /* Enables full resolution */
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_FULL_RES, ADXL313_FULL_RES);
+ if (ret)
+ return ret;
}
- /* Sets the range to +/- 4g */
- ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
- ADXL313_RANGE_MSK,
- FIELD_PREP(ADXL313_RANGE_MSK, ADXL313_RANGE_4G));
- if (ret)
- return ret;
-
- /* Enables full resolution */
- ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
- ADXL313_FULL_RES, ADXL313_FULL_RES);
- if (ret)
- return ret;
-
/* Enables measurement mode */
return regmap_update_bits(data->regmap, ADXL313_REG_POWER_CTL,
ADXL313_POWER_CTL_MSK,
@@ -288,7 +378,7 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
* adxl313_core_probe() - probe and setup for adxl313 accelerometer
* @dev: Driver model representation of the device
* @regmap: Register map of the device
- * @name: Device name buffer reference
+ * @chip_info: Structure containing device specific data
* @setup: Setup routine to be executed right before the standard device
* setup, can also be set to NULL if not required
*
@@ -296,7 +386,7 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
*/
int adxl313_core_probe(struct device *dev,
struct regmap *regmap,
- const char *name,
+ const struct adxl313_chip_info *chip_info,
int (*setup)(struct device *, struct regmap *))
{
struct adxl313_data *data;
@@ -309,9 +399,11 @@ int adxl313_core_probe(struct device *dev,
data = iio_priv(indio_dev);
data->regmap = regmap;
+ data->chip_info = chip_info;
+
mutex_init(&data->lock);
- indio_dev->name = name;
+ indio_dev->name = chip_info->name;
indio_dev->info = &adxl313_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adxl313_channels;
diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
index c329765dbf60..99cc7fc29488 100644
--- a/drivers/iio/accel/adxl313_i2c.c
+++ b/drivers/iio/accel/adxl313_i2c.c
@@ -14,42 +14,72 @@
#include "adxl313.h"
-static const struct regmap_config adxl313_i2c_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .rd_table = &adxl313_readable_regs_table,
- .wr_table = &adxl313_writable_regs_table,
- .max_register = 0x39,
+static const struct regmap_config adxl31x_i2c_regmap_config[] = {
+ [ADXL312] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl312_readable_regs_table,
+ .wr_table = &adxl312_writable_regs_table,
+ .max_register = 0x39,
+ },
+ [ADXL313] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+ },
+ [ADXL314] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl314_readable_regs_table,
+ .wr_table = &adxl314_writable_regs_table,
+ .max_register = 0x39,
+ },
};
-static int adxl313_i2c_probe(struct i2c_client *client)
-{
- struct regmap *regmap;
-
- regmap = devm_regmap_init_i2c(client, &adxl313_i2c_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
- PTR_ERR(regmap));
- return PTR_ERR(regmap);
- }
-
- return adxl313_core_probe(&client->dev, regmap, client->name, NULL);
-}
-
static const struct i2c_device_id adxl313_i2c_id[] = {
- { "adxl313" },
+ { .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
{ }
};
MODULE_DEVICE_TABLE(i2c, adxl313_i2c_id);
static const struct of_device_id adxl313_of_match[] = {
- { .compatible = "adi,adxl313" },
+ { .compatible = "adi,adxl312", .data = &adxl31x_chip_info[ADXL312] },
+ { .compatible = "adi,adxl313", .data = &adxl31x_chip_info[ADXL313] },
+ { .compatible = "adi,adxl314", .data = &adxl31x_chip_info[ADXL314] },
{ }
};
MODULE_DEVICE_TABLE(of, adxl313_of_match);
+static int adxl313_i2c_probe(struct i2c_client *client)
+{
+ const struct adxl313_chip_info *chip_data;
+ struct regmap *regmap;
+
+ /*
+ * Retrieves device specific data as a pointer to a
+ * adxl313_chip_info structure
+ */
+ chip_data = device_get_match_data(&client->dev);
+ if (!chip_data)
+ chip_data = (const struct adxl313_chip_info *)i2c_match_id(adxl313_i2c_id, client)->driver_data;
+
+ regmap = devm_regmap_init_i2c(client,
+ &adxl31x_i2c_regmap_config[chip_data->type]);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl313_core_probe(&client->dev, regmap, chip_data, NULL);
+}
+
static struct i2c_driver adxl313_i2c_driver = {
.driver = {
.name = "adxl313_i2c",
diff --git a/drivers/iio/accel/adxl313_spi.c b/drivers/iio/accel/adxl313_spi.c
index a3c6d553462d..b7cc15678a2b 100644
--- a/drivers/iio/accel/adxl313_spi.c
+++ b/drivers/iio/accel/adxl313_spi.c
@@ -11,17 +11,38 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/property.h>
#include "adxl313.h"
-static const struct regmap_config adxl313_spi_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .rd_table = &adxl313_readable_regs_table,
- .wr_table = &adxl313_writable_regs_table,
- .max_register = 0x39,
- /* Setting bits 7 and 6 enables multiple-byte read */
- .read_flag_mask = BIT(7) | BIT(6),
+static const struct regmap_config adxl31x_spi_regmap_config[] = {
+ [ADXL312] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl312_readable_regs_table,
+ .wr_table = &adxl312_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
+ [ADXL313] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
+ [ADXL314] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl314_readable_regs_table,
+ .wr_table = &adxl314_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
};
static int adxl313_spi_setup(struct device *dev, struct regmap *regmap)
@@ -42,7 +63,7 @@ static int adxl313_spi_setup(struct device *dev, struct regmap *regmap)
static int adxl313_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct adxl313_chip_info *chip_data;
struct regmap *regmap;
int ret;
@@ -51,26 +72,40 @@ static int adxl313_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- regmap = devm_regmap_init_spi(spi, &adxl313_spi_regmap_config);
+ /*
+ * Retrieves device specific data as a pointer to a
+ * adxl313_chip_info structure
+ */
+ chip_data = device_get_match_data(&spi->dev);
+ if (!chip_data)
+ chip_data = (const struct adxl313_chip_info *)spi_get_device_id(spi)->driver_data;
+
+ regmap = devm_regmap_init_spi(spi,
+ &adxl31x_spi_regmap_config[chip_data->type]);
+
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- return adxl313_core_probe(&spi->dev, regmap, id->name,
- &adxl313_spi_setup);
+ return adxl313_core_probe(&spi->dev, regmap,
+ chip_data, &adxl313_spi_setup);
}
static const struct spi_device_id adxl313_spi_id[] = {
- { "adxl313" },
+ { .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] },
+ { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] },
{ }
};
MODULE_DEVICE_TABLE(spi, adxl313_spi_id);
static const struct of_device_id adxl313_of_match[] = {
- { .compatible = "adi,adxl313" },
+ { .compatible = "adi,adxl312", .data = &adxl31x_chip_info[ADXL312] },
+ { .compatible = "adi,adxl313", .data = &adxl31x_chip_info[ADXL313] },
+ { .compatible = "adi,adxl314", .data = &adxl31x_chip_info[ADXL314] },
{ }
};
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 370bfec1275a..1919e0089c11 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -32,7 +33,6 @@
#define ADXL345_BW_RATE GENMASK(3, 0)
#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
-#define NHZ_PER_HZ 1000000000LL
#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
@@ -139,7 +139,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
samp_freq_nhz = ADXL345_BASE_RATE_NANO_HZ <<
(regval & ADXL345_BW_RATE);
- *val = div_s64_rem(samp_freq_nhz, NHZ_PER_HZ, val2);
+ *val = div_s64_rem(samp_freq_nhz, NANOHZ_PER_HZ, val2);
return IIO_VAL_INT_PLUS_NANO;
}
@@ -164,7 +164,8 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
ADXL345_REG_OFS_AXIS(chan->address),
val / 4);
case IIO_CHAN_INFO_SAMP_FREQ:
- n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ);
+ n = div_s64(val * NANOHZ_PER_HZ + val2,
+ ADXL345_BASE_RATE_NANO_HZ);
return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE,
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index e8f802a82300..36edbaff4f7f 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -40,6 +40,7 @@
#define BMA400_INT_STAT1_REG 0x0f
#define BMA400_INT_STAT2_REG 0x10
#define BMA400_INT12_MAP_REG 0x23
+#define BMA400_INT_ENG_OVRUN_MSK BIT(4)
/* Temperature register */
#define BMA400_TEMP_DATA_REG 0x11
@@ -105,6 +106,19 @@
#define BMA400_INT_GEN2_MSK BIT(3)
#define BMA400_GEN_HYST_MSK GENMASK(1, 0)
+/* TAP config registers */
+#define BMA400_TAP_CONFIG 0x57
+#define BMA400_TAP_CONFIG1 0x58
+#define BMA400_S_TAP_MSK BIT(2)
+#define BMA400_D_TAP_MSK BIT(3)
+#define BMA400_INT_S_TAP_MSK BIT(10)
+#define BMA400_INT_D_TAP_MSK BIT(11)
+#define BMA400_TAP_SEN_MSK GENMASK(2, 0)
+#define BMA400_TAP_TICSTH_MSK GENMASK(1, 0)
+#define BMA400_TAP_QUIET_MSK GENMASK(3, 2)
+#define BMA400_TAP_QUIETDT_MSK GENMASK(5, 4)
+#define BMA400_TAP_TIM_LIST_LEN 4
+
/*
* BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before
* converting to micro values for +-2g range.
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index c31bdd9b168e..ad8fce3e08cd 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -26,6 +26,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -47,6 +48,27 @@ static int bma400_sample_freqs[14];
static const int bma400_osr_range[] = { 0, 1, 3 };
+static int tap_reset_timeout[BMA400_TAP_TIM_LIST_LEN] = {
+ 300000,
+ 400000,
+ 500000,
+ 600000
+};
+
+static int tap_max2min_time[BMA400_TAP_TIM_LIST_LEN] = {
+ 30000,
+ 45000,
+ 60000,
+ 90000
+};
+
+static int double_tap2_min_delay[BMA400_TAP_TIM_LIST_LEN] = {
+ 20000,
+ 40000,
+ 60000,
+ 80000
+};
+
/* See the ACC_CONFIG0 section of the datasheet */
enum bma400_power_mode {
POWER_MODE_SLEEP = 0x00,
@@ -88,6 +110,7 @@ struct bma400_data {
bool step_event_en;
bool activity_event_en;
unsigned int generic_event_en;
+ unsigned int tap_event_en_bitmask;
/* Correct time stamp alignment */
struct {
__le16 buff[3];
@@ -216,6 +239,115 @@ static const struct iio_event_spec bma400_accel_event[] = {
BIT(IIO_EV_INFO_HYSTERESIS) |
BIT(IIO_EV_INFO_ENABLE),
},
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
+static int usec_to_tapreg_raw(int usec, const int *time_list)
+{
+ int index;
+
+ for (index = 0; index < BMA400_TAP_TIM_LIST_LEN; index++) {
+ if (usec == time_list[index])
+ return index;
+ }
+ return -EINVAL;
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, reg_val, raw, vals[2];
+
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1, &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_TICSTH_MSK, reg_val);
+ vals[0] = 0;
+ vals[1] = tap_max2min_time[raw];
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, val_int, val_fract, raw;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract);
+ if (ret)
+ return ret;
+
+ raw = usec_to_tapreg_raw(val_fract, tap_max2min_time);
+ if (raw < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, BMA400_TAP_CONFIG1,
+ BMA400_TAP_TICSTH_MSK,
+ FIELD_PREP(BMA400_TAP_TICSTH_MSK, raw));
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_maxtomin_time, 0);
+
+/*
+ * Tap interrupts works with 200 Hz input data rate and the time based tap
+ * controls are in the terms of data samples so the below calculation is
+ * used to convert the configuration values into seconds.
+ * e.g.:
+ * 60 data samples * 0.005 ms = 0.3 seconds.
+ * 80 data samples * 0.005 ms = 0.4 seconds.
+ */
+
+/* quiet configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_tap_reset_timeout_available,
+ "0.3 0.4 0.5 0.6");
+
+/* tics_th configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_tap_maxtomin_time_available,
+ "0.03 0.045 0.06 0.09");
+
+/* quiet_dt configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_doubletap_tap2_min_delay_available,
+ "0.02 0.04 0.06 0.08");
+
+/* List of sensitivity values available to configure tap interrupts */
+static IIO_CONST_ATTR(in_accel_gesture_tap_value_available, "0 1 2 3 4 5 6 7");
+
+static struct attribute *bma400_event_attributes[] = {
+ &iio_const_attr_in_accel_gesture_tap_value_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_reset_timeout_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_maxtomin_time_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_doubletap_tap2_min_delay_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_gesture_tap_maxtomin_time.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group bma400_event_attribute_group = {
+ .attrs = bma400_event_attributes,
};
#define BMA400_ACC_CHANNEL(_index, _axis) { \
@@ -1012,6 +1144,12 @@ static int bma400_read_event_config(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
return FIELD_GET(BMA400_INT_GEN2_MSK,
data->generic_event_en);
+ case IIO_EV_DIR_SINGLETAP:
+ return FIELD_GET(BMA400_S_TAP_MSK,
+ data->tap_event_en_bitmask);
+ case IIO_EV_DIR_DOUBLETAP:
+ return FIELD_GET(BMA400_D_TAP_MSK,
+ data->tap_event_en_bitmask);
default:
return -EINVAL;
}
@@ -1046,7 +1184,8 @@ static int bma400_activity_event_en(struct bma400_data *data,
enum iio_event_direction dir,
int state)
{
- int ret, reg, msk, value, field_value;
+ int ret, reg, msk, value;
+ int field_value = 0;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1101,6 +1240,80 @@ static int bma400_activity_event_en(struct bma400_data *data,
return 0;
}
+static int bma400_tap_event_en(struct bma400_data *data,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int mask, field_value;
+ int ret;
+
+ /*
+ * Tap interrupts can be configured only in normal mode.
+ * See table in section 4.3 "Power modes - performance modes" of
+ * datasheet v1.2.
+ */
+ if (data->power_mode != POWER_MODE_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Tap interrupts are operating with a data rate of 200Hz.
+ * See section 4.7 "Tap sensing interrupt" in datasheet v1.2.
+ */
+ if (data->sample_freq.hz != 200 && state) {
+ dev_err(data->dev, "Invalid data rate for tap interrupts.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
+ BMA400_S_TAP_MSK,
+ FIELD_PREP(BMA400_S_TAP_MSK, state));
+ if (ret)
+ return ret;
+
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ mask = BMA400_S_TAP_MSK;
+ set_mask_bits(&field_value, BMA400_S_TAP_MSK,
+ FIELD_PREP(BMA400_S_TAP_MSK, state));
+ break;
+ case IIO_EV_DIR_DOUBLETAP:
+ mask = BMA400_D_TAP_MSK;
+ set_mask_bits(&field_value, BMA400_D_TAP_MSK,
+ FIELD_PREP(BMA400_D_TAP_MSK, state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG1_REG, mask,
+ field_value);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->tap_event_en_bitmask, mask, field_value);
+
+ return 0;
+}
+
+static int bma400_disable_adv_interrupt(struct bma400_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, BMA400_INT_CONFIG0_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_INT_CONFIG1_REG, 0);
+ if (ret)
+ return ret;
+
+ data->tap_event_en_bitmask = 0;
+ data->generic_event_en = 0;
+ data->step_event_en = false;
+ data->activity_event_en = false;
+
+ return 0;
+}
+
static int bma400_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -1111,10 +1324,20 @@ static int bma400_write_event_config(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
- mutex_lock(&data->mutex);
- ret = bma400_activity_event_en(data, dir, state);
- mutex_unlock(&data->mutex);
- return ret;
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
+ mutex_lock(&data->mutex);
+ ret = bma400_activity_event_en(data, dir, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_EV_TYPE_GESTURE:
+ mutex_lock(&data->mutex);
+ ret = bma400_tap_event_en(data, dir, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
case IIO_STEPS:
mutex_lock(&data->mutex);
ret = bma400_steps_event_enable(data, state);
@@ -1157,10 +1380,13 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int ret, reg;
+ int ret, reg, reg_val, raw;
- switch (chan->type) {
- case IIO_ACCEL:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
reg = get_gen_config_reg(dir);
if (reg < 0)
return -EINVAL;
@@ -1196,6 +1422,39 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(BMA400_TAP_SEN_MSK, reg_val);
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_QUIET_MSK, reg_val);
+ *val = 0;
+ *val2 = tap_reset_timeout[raw];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_QUIETDT_MSK, reg_val);
+ *val = 0;
+ *val2 = double_tap2_min_delay[raw];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -1209,10 +1468,13 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int reg, ret;
+ int reg, ret, raw;
- switch (chan->type) {
- case IIO_ACCEL:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
reg = get_gen_config_reg(dir);
if (reg < 0)
return -EINVAL;
@@ -1248,6 +1510,40 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG,
+ BMA400_TAP_SEN_MSK,
+ FIELD_PREP(BMA400_TAP_SEN_MSK,
+ val));
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ raw = usec_to_tapreg_raw(val2, tap_reset_timeout);
+ if (raw < 0)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG1,
+ BMA400_TAP_QUIET_MSK,
+ FIELD_PREP(BMA400_TAP_QUIET_MSK,
+ raw));
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ raw = usec_to_tapreg_raw(val2, double_tap2_min_delay);
+ if (raw < 0)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG1,
+ BMA400_TAP_QUIETDT_MSK,
+ FIELD_PREP(BMA400_TAP_QUIETDT_MSK,
+ raw));
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -1287,6 +1583,7 @@ static const struct iio_info bma400_info = {
.write_event_config = bma400_write_event_config,
.write_event_value = bma400_write_event_value,
.read_event_value = bma400_read_event_value,
+ .event_attrs = &bma400_event_attribute_group,
};
static const struct iio_trigger_ops bma400_trigger_ops = {
@@ -1350,6 +1647,32 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
if (ret || !data->status)
goto unlock_err;
+ /*
+ * Disable all advance interrupts if interrupt engine overrun occurs.
+ * See section 4.7 "Interrupt engine overrun" in datasheet v1.2.
+ */
+ if (FIELD_GET(BMA400_INT_ENG_OVRUN_MSK, le16_to_cpu(data->status))) {
+ bma400_disable_adv_interrupt(data);
+ dev_err(data->dev, "Interrupt engine overrun\n");
+ goto unlock_err;
+ }
+
+ if (FIELD_GET(BMA400_INT_S_TAP_MSK, le16_to_cpu(data->status)))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_SINGLETAP),
+ timestamp);
+
+ if (FIELD_GET(BMA400_INT_D_TAP_MSK, le16_to_cpu(data->status)))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_DOUBLETAP),
+ timestamp);
+
if (FIELD_GET(BMA400_INT_GEN1_MSK, le16_to_cpu(data->status)))
ev_dir = IIO_EV_DIR_RISING;
@@ -1467,5 +1790,6 @@ int bma400_probe(struct device *dev, struct regmap *regmap, int irq,
EXPORT_SYMBOL_NS(bma400_probe, IIO_BMA400);
MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index bca4cf98bf4d..84edcc78d796 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -606,7 +606,7 @@ void bmi088_accel_core_remove(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(bmi088_accel_core_remove, IIO_BMI088);
-static int __maybe_unused bmi088_accel_runtime_suspend(struct device *dev)
+static int bmi088_accel_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmi088_accel_data *data = iio_priv(indio_dev);
@@ -614,7 +614,7 @@ static int __maybe_unused bmi088_accel_runtime_suspend(struct device *dev)
return bmi088_accel_power_down(data);
}
-static int __maybe_unused bmi088_accel_runtime_resume(struct device *dev)
+static int bmi088_accel_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmi088_accel_data *data = iio_priv(indio_dev);
@@ -622,13 +622,10 @@ static int __maybe_unused bmi088_accel_runtime_resume(struct device *dev)
return bmi088_accel_power_up(data);
}
-const struct dev_pm_ops bmi088_accel_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(bmi088_accel_runtime_suspend,
- bmi088_accel_runtime_resume, NULL)
-};
-EXPORT_SYMBOL_NS_GPL(bmi088_accel_pm_ops, IIO_BMI088);
+EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(bmi088_accel_pm_ops,
+ bmi088_accel_runtime_suspend,
+ bmi088_accel_runtime_resume, NULL,
+ IIO_BMI088);
MODULE_AUTHOR("Niek van Agt <niek.van.agt@topicproducts.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 9e2ed3bd5661..ee540edd8412 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(spi, bmi088_accel_id);
static struct spi_driver bmi088_accel_driver = {
.driver = {
.name = "bmi088_accel_spi",
- .pm = &bmi088_accel_pm_ops,
+ .pm = pm_ptr(&bmi088_accel_pm_ops),
.of_match_table = bmi088_of_match,
},
.probe = bmi088_accel_probe,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 94f7b6ac5c87..adc66b3615c0 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1064,7 +1064,7 @@ static int kxcjk1013_write_event_config(struct iio_dev *indio_dev,
/*
* We will expect the enable and disable to do operation in
- * in reverse order. This will happen here anyway as our
+ * reverse order. This will happen here anyway as our
* resume operation uses sync mode runtime pm calls, the
* suspend operation will be delayed by autosuspend delay
* So the disable operation will still happen in reverse of
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
new file mode 100644
index 000000000000..2fded3759171
--- /dev/null
+++ b/drivers/iio/accel/msa311.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MEMSensing digital 3-Axis accelerometer
+ *
+ * MSA311 is a tri-axial, low-g accelerometer with I2C digital output for
+ * sensitivity consumer applications. It has dynamic user-selectable full
+ * scales range of +-2g/+-4g/+-8g/+-16g and allows acceleration measurements
+ * with output data rates from 1Hz to 1000Hz.
+ *
+ * MSA311 is available in an ultra small (2mm x 2mm, height 0.95mm) LGA package
+ * and is guaranteed to operate over -40C to +85C.
+ *
+ * This driver supports following MSA311 features:
+ * - IIO interface
+ * - Different power modes: NORMAL, SUSPEND
+ * - ODR (Output Data Rate) selection
+ * - Scale selection
+ * - IIO triggered buffer
+ * - NEW_DATA interrupt + trigger
+ *
+ * Below features to be done:
+ * - Motion Events: ACTIVE, TAP, ORIENT, FREEFALL
+ * - Low Power mode
+ *
+ * Copyright (c) 2022, SberDevices. All Rights Reserved.
+ *
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/string_helpers.h>
+#include <linux/units.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define MSA311_SOFT_RESET_REG 0x00
+#define MSA311_PARTID_REG 0x01
+#define MSA311_ACC_X_REG 0x02
+#define MSA311_ACC_Y_REG 0x04
+#define MSA311_ACC_Z_REG 0x06
+#define MSA311_MOTION_INT_REG 0x09
+#define MSA311_DATA_INT_REG 0x0A
+#define MSA311_TAP_ACTIVE_STS_REG 0x0B
+#define MSA311_ORIENT_STS_REG 0x0C
+#define MSA311_RANGE_REG 0x0F
+#define MSA311_ODR_REG 0x10
+#define MSA311_PWR_MODE_REG 0x11
+#define MSA311_SWAP_POLARITY_REG 0x12
+#define MSA311_INT_SET_0_REG 0x16
+#define MSA311_INT_SET_1_REG 0x17
+#define MSA311_INT_MAP_0_REG 0x19
+#define MSA311_INT_MAP_1_REG 0x1A
+#define MSA311_INT_CONFIG_REG 0x20
+#define MSA311_INT_LATCH_REG 0x21
+#define MSA311_FREEFALL_DUR_REG 0x22
+#define MSA311_FREEFALL_TH_REG 0x23
+#define MSA311_FREEFALL_HY_REG 0x24
+#define MSA311_ACTIVE_DUR_REG 0x27
+#define MSA311_ACTIVE_TH_REG 0x28
+#define MSA311_TAP_DUR_REG 0x2A
+#define MSA311_TAP_TH_REG 0x2B
+#define MSA311_ORIENT_HY_REG 0x2C
+#define MSA311_Z_BLOCK_REG 0x2D
+#define MSA311_OFFSET_X_REG 0x38
+#define MSA311_OFFSET_Y_REG 0x39
+#define MSA311_OFFSET_Z_REG 0x3A
+
+enum msa311_fields {
+ /* Soft_Reset */
+ F_SOFT_RESET_I2C, F_SOFT_RESET_SPI,
+ /* Motion_Interrupt */
+ F_ORIENT_INT, F_S_TAP_INT, F_D_TAP_INT, F_ACTIVE_INT, F_FREEFALL_INT,
+ /* Data_Interrupt */
+ F_NEW_DATA_INT,
+ /* Tap_Active_Status */
+ F_TAP_SIGN, F_TAP_FIRST_X, F_TAP_FIRST_Y, F_TAP_FIRST_Z, F_ACTV_SIGN,
+ F_ACTV_FIRST_X, F_ACTV_FIRST_Y, F_ACTV_FIRST_Z,
+ /* Orientation_Status */
+ F_ORIENT_Z, F_ORIENT_X_Y,
+ /* Range */
+ F_FS,
+ /* ODR */
+ F_X_AXIS_DIS, F_Y_AXIS_DIS, F_Z_AXIS_DIS, F_ODR,
+ /* Power Mode/Bandwidth */
+ F_PWR_MODE, F_LOW_POWER_BW,
+ /* Swap_Polarity */
+ F_X_POLARITY, F_Y_POLARITY, F_Z_POLARITY, F_X_Y_SWAP,
+ /* Int_Set_0 */
+ F_ORIENT_INT_EN, F_S_TAP_INT_EN, F_D_TAP_INT_EN, F_ACTIVE_INT_EN_Z,
+ F_ACTIVE_INT_EN_Y, F_ACTIVE_INT_EN_X,
+ /* Int_Set_1 */
+ F_NEW_DATA_INT_EN, F_FREEFALL_INT_EN,
+ /* Int_Map_0 */
+ F_INT1_ORIENT, F_INT1_S_TAP, F_INT1_D_TAP, F_INT1_ACTIVE,
+ F_INT1_FREEFALL,
+ /* Int_Map_1 */
+ F_INT1_NEW_DATA,
+ /* Int_Config */
+ F_INT1_OD, F_INT1_LVL,
+ /* Int_Latch */
+ F_RESET_INT, F_LATCH_INT,
+ /* Freefall_Hy */
+ F_FREEFALL_MODE, F_FREEFALL_HY,
+ /* Active_Dur */
+ F_ACTIVE_DUR,
+ /* Tap_Dur */
+ F_TAP_QUIET, F_TAP_SHOCK, F_TAP_DUR,
+ /* Tap_Th */
+ F_TAP_TH,
+ /* Orient_Hy */
+ F_ORIENT_HYST, F_ORIENT_BLOCKING, F_ORIENT_MODE,
+ /* Z_Block */
+ F_Z_BLOCKING,
+ /* End of register map */
+ F_MAX_FIELDS,
+};
+
+static const struct reg_field msa311_reg_fields[] = {
+ /* Soft_Reset */
+ [F_SOFT_RESET_I2C] = REG_FIELD(MSA311_SOFT_RESET_REG, 2, 2),
+ [F_SOFT_RESET_SPI] = REG_FIELD(MSA311_SOFT_RESET_REG, 5, 5),
+ /* Motion_Interrupt */
+ [F_ORIENT_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 6, 6),
+ [F_S_TAP_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 5, 5),
+ [F_D_TAP_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 4, 4),
+ [F_ACTIVE_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 2, 2),
+ [F_FREEFALL_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 0, 0),
+ /* Data_Interrupt */
+ [F_NEW_DATA_INT] = REG_FIELD(MSA311_DATA_INT_REG, 0, 0),
+ /* Tap_Active_Status */
+ [F_TAP_SIGN] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 7, 7),
+ [F_TAP_FIRST_X] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 6, 6),
+ [F_TAP_FIRST_Y] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 5, 5),
+ [F_TAP_FIRST_Z] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 4, 4),
+ [F_ACTV_SIGN] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 3, 3),
+ [F_ACTV_FIRST_X] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 2, 2),
+ [F_ACTV_FIRST_Y] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 1, 1),
+ [F_ACTV_FIRST_Z] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 0, 0),
+ /* Orientation_Status */
+ [F_ORIENT_Z] = REG_FIELD(MSA311_ORIENT_STS_REG, 6, 6),
+ [F_ORIENT_X_Y] = REG_FIELD(MSA311_ORIENT_STS_REG, 4, 5),
+ /* Range */
+ [F_FS] = REG_FIELD(MSA311_RANGE_REG, 0, 1),
+ /* ODR */
+ [F_X_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 7, 7),
+ [F_Y_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 6, 6),
+ [F_Z_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 5, 5),
+ [F_ODR] = REG_FIELD(MSA311_ODR_REG, 0, 3),
+ /* Power Mode/Bandwidth */
+ [F_PWR_MODE] = REG_FIELD(MSA311_PWR_MODE_REG, 6, 7),
+ [F_LOW_POWER_BW] = REG_FIELD(MSA311_PWR_MODE_REG, 1, 4),
+ /* Swap_Polarity */
+ [F_X_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 3, 3),
+ [F_Y_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 2, 2),
+ [F_Z_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 1, 1),
+ [F_X_Y_SWAP] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 0, 0),
+ /* Int_Set_0 */
+ [F_ORIENT_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 6, 6),
+ [F_S_TAP_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 5, 5),
+ [F_D_TAP_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 4, 4),
+ [F_ACTIVE_INT_EN_Z] = REG_FIELD(MSA311_INT_SET_0_REG, 2, 2),
+ [F_ACTIVE_INT_EN_Y] = REG_FIELD(MSA311_INT_SET_0_REG, 1, 1),
+ [F_ACTIVE_INT_EN_X] = REG_FIELD(MSA311_INT_SET_0_REG, 0, 0),
+ /* Int_Set_1 */
+ [F_NEW_DATA_INT_EN] = REG_FIELD(MSA311_INT_SET_1_REG, 4, 4),
+ [F_FREEFALL_INT_EN] = REG_FIELD(MSA311_INT_SET_1_REG, 3, 3),
+ /* Int_Map_0 */
+ [F_INT1_ORIENT] = REG_FIELD(MSA311_INT_MAP_0_REG, 6, 6),
+ [F_INT1_S_TAP] = REG_FIELD(MSA311_INT_MAP_0_REG, 5, 5),
+ [F_INT1_D_TAP] = REG_FIELD(MSA311_INT_MAP_0_REG, 4, 4),
+ [F_INT1_ACTIVE] = REG_FIELD(MSA311_INT_MAP_0_REG, 2, 2),
+ [F_INT1_FREEFALL] = REG_FIELD(MSA311_INT_MAP_0_REG, 0, 0),
+ /* Int_Map_1 */
+ [F_INT1_NEW_DATA] = REG_FIELD(MSA311_INT_MAP_1_REG, 0, 0),
+ /* Int_Config */
+ [F_INT1_OD] = REG_FIELD(MSA311_INT_CONFIG_REG, 1, 1),
+ [F_INT1_LVL] = REG_FIELD(MSA311_INT_CONFIG_REG, 0, 0),
+ /* Int_Latch */
+ [F_RESET_INT] = REG_FIELD(MSA311_INT_LATCH_REG, 7, 7),
+ [F_LATCH_INT] = REG_FIELD(MSA311_INT_LATCH_REG, 0, 3),
+ /* Freefall_Hy */
+ [F_FREEFALL_MODE] = REG_FIELD(MSA311_FREEFALL_HY_REG, 2, 2),
+ [F_FREEFALL_HY] = REG_FIELD(MSA311_FREEFALL_HY_REG, 0, 1),
+ /* Active_Dur */
+ [F_ACTIVE_DUR] = REG_FIELD(MSA311_ACTIVE_DUR_REG, 0, 1),
+ /* Tap_Dur */
+ [F_TAP_QUIET] = REG_FIELD(MSA311_TAP_DUR_REG, 7, 7),
+ [F_TAP_SHOCK] = REG_FIELD(MSA311_TAP_DUR_REG, 6, 6),
+ [F_TAP_DUR] = REG_FIELD(MSA311_TAP_DUR_REG, 0, 2),
+ /* Tap_Th */
+ [F_TAP_TH] = REG_FIELD(MSA311_TAP_TH_REG, 0, 4),
+ /* Orient_Hy */
+ [F_ORIENT_HYST] = REG_FIELD(MSA311_ORIENT_HY_REG, 4, 6),
+ [F_ORIENT_BLOCKING] = REG_FIELD(MSA311_ORIENT_HY_REG, 2, 3),
+ [F_ORIENT_MODE] = REG_FIELD(MSA311_ORIENT_HY_REG, 0, 1),
+ /* Z_Block */
+ [F_Z_BLOCKING] = REG_FIELD(MSA311_Z_BLOCK_REG, 0, 3),
+};
+
+#define MSA311_WHO_AM_I 0x13
+
+/*
+ * Possible Full Scale ranges
+ *
+ * Axis data is 12-bit signed value, so
+ *
+ * fs0 = (2 + 2) * 9.81 / (2^11) = 0.009580
+ * fs1 = (4 + 4) * 9.81 / (2^11) = 0.019160
+ * fs2 = (8 + 8) * 9.81 / (2^11) = 0.038320
+ * fs3 = (16 + 16) * 9.81 / (2^11) = 0.076641
+ */
+enum {
+ MSA311_FS_2G,
+ MSA311_FS_4G,
+ MSA311_FS_8G,
+ MSA311_FS_16G,
+};
+
+struct iio_decimal_fract {
+ int integral;
+ int microfract;
+};
+
+static const struct iio_decimal_fract msa311_fs_table[] = {
+ {0, 9580}, {0, 19160}, {0, 38320}, {0, 76641},
+};
+
+/* Possible Output Data Rate values */
+enum {
+ MSA311_ODR_1_HZ,
+ MSA311_ODR_1_95_HZ,
+ MSA311_ODR_3_9_HZ,
+ MSA311_ODR_7_81_HZ,
+ MSA311_ODR_15_63_HZ,
+ MSA311_ODR_31_25_HZ,
+ MSA311_ODR_62_5_HZ,
+ MSA311_ODR_125_HZ,
+ MSA311_ODR_250_HZ,
+ MSA311_ODR_500_HZ,
+ MSA311_ODR_1000_HZ,
+};
+
+static const struct iio_decimal_fract msa311_odr_table[] = {
+ {1, 0}, {1, 950000}, {3, 900000}, {7, 810000}, {15, 630000},
+ {31, 250000}, {62, 500000}, {125, 0}, {250, 0}, {500, 0}, {1000, 0},
+};
+
+/* All supported power modes */
+#define MSA311_PWR_MODE_NORMAL 0b00
+#define MSA311_PWR_MODE_LOW 0b01
+#define MSA311_PWR_MODE_UNKNOWN 0b10
+#define MSA311_PWR_MODE_SUSPEND 0b11
+static const char * const msa311_pwr_modes[] = {
+ [MSA311_PWR_MODE_NORMAL] = "normal",
+ [MSA311_PWR_MODE_LOW] = "low",
+ [MSA311_PWR_MODE_UNKNOWN] = "unknown",
+ [MSA311_PWR_MODE_SUSPEND] = "suspend",
+};
+
+/* Autosuspend delay */
+#define MSA311_PWR_SLEEP_DELAY_MS 2000
+
+/* Possible INT1 types and levels */
+enum {
+ MSA311_INT1_OD_PUSH_PULL,
+ MSA311_INT1_OD_OPEN_DRAIN,
+};
+
+enum {
+ MSA311_INT1_LVL_LOW,
+ MSA311_INT1_LVL_HIGH,
+};
+
+/* Latch INT modes */
+#define MSA311_LATCH_INT_NOT_LATCHED 0b0000
+#define MSA311_LATCH_INT_250MS 0b0001
+#define MSA311_LATCH_INT_500MS 0b0010
+#define MSA311_LATCH_INT_1S 0b0011
+#define MSA311_LATCH_INT_2S 0b0100
+#define MSA311_LATCH_INT_4S 0b0101
+#define MSA311_LATCH_INT_8S 0b0110
+#define MSA311_LATCH_INT_1MS 0b1010
+#define MSA311_LATCH_INT_2MS 0b1011
+#define MSA311_LATCH_INT_25MS 0b1100
+#define MSA311_LATCH_INT_50MS 0b1101
+#define MSA311_LATCH_INT_100MS 0b1110
+#define MSA311_LATCH_INT_LATCHED 0b0111
+
+static const struct regmap_range msa311_readonly_registers[] = {
+ regmap_reg_range(MSA311_PARTID_REG, MSA311_ORIENT_STS_REG),
+};
+
+static const struct regmap_access_table msa311_writeable_table = {
+ .no_ranges = msa311_readonly_registers,
+ .n_no_ranges = ARRAY_SIZE(msa311_readonly_registers),
+};
+
+static const struct regmap_range msa311_writeonly_registers[] = {
+ regmap_reg_range(MSA311_SOFT_RESET_REG, MSA311_SOFT_RESET_REG),
+};
+
+static const struct regmap_access_table msa311_readable_table = {
+ .no_ranges = msa311_writeonly_registers,
+ .n_no_ranges = ARRAY_SIZE(msa311_writeonly_registers),
+};
+
+static const struct regmap_range msa311_volatile_registers[] = {
+ regmap_reg_range(MSA311_ACC_X_REG, MSA311_ORIENT_STS_REG),
+};
+
+static const struct regmap_access_table msa311_volatile_table = {
+ .yes_ranges = msa311_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(msa311_volatile_registers),
+};
+
+static const struct regmap_config msa311_regmap_config = {
+ .name = "msa311",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MSA311_OFFSET_Z_REG,
+ .wr_table = &msa311_writeable_table,
+ .rd_table = &msa311_readable_table,
+ .volatile_table = &msa311_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#define MSA311_GENMASK(field) ({ \
+ typeof(&(msa311_reg_fields)[0]) _field; \
+ _field = &msa311_reg_fields[(field)]; \
+ GENMASK(_field->msb, _field->lsb); \
+})
+
+/**
+ * struct msa311_priv - MSA311 internal private state
+ * @regs: Underlying I2C bus adapter used to abstract slave
+ * register accesses
+ * @fields: Abstract objects for each registers fields access
+ * @dev: Device handler associated with appropriate bus client
+ * @lock: Protects msa311 device state between setup and data access routines
+ * (power transitions, samp_freq/scale tune, retrieving axes data, etc)
+ * @chip_name: Chip name in the format "msa311-%02x" % partid
+ * @new_data_trig: Optional NEW_DATA interrupt driven trigger used
+ * to notify external consumers a new sample is ready
+ * @vdd: Optional external voltage regulator for the device power supply
+ */
+struct msa311_priv {
+ struct regmap *regs;
+ struct regmap_field *fields[F_MAX_FIELDS];
+
+ struct device *dev;
+ struct mutex lock;
+ char *chip_name;
+
+ struct iio_trigger *new_data_trig;
+ struct regulator *vdd;
+};
+
+enum msa311_si {
+ MSA311_SI_X,
+ MSA311_SI_Y,
+ MSA311_SI_Z,
+ MSA311_SI_TIMESTAMP,
+};
+
+#define MSA311_ACCEL_CHANNEL(axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = MSA311_SI_##axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_LE, \
+ }, \
+ .datasheet_name = "ACC_"#axis, \
+}
+
+static const struct iio_chan_spec msa311_channels[] = {
+ MSA311_ACCEL_CHANNEL(X),
+ MSA311_ACCEL_CHANNEL(Y),
+ MSA311_ACCEL_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(MSA311_SI_TIMESTAMP),
+};
+
+/**
+ * msa311_get_odr() - Read Output Data Rate (ODR) value from MSA311 accel
+ * @msa311: MSA311 internal private state
+ * @odr: output ODR value
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -ERRNO in other failures
+ */
+static int msa311_get_odr(struct msa311_priv *msa311, unsigned int *odr)
+{
+ int err;
+
+ err = regmap_field_read(msa311->fields[F_ODR], odr);
+ if (err)
+ return err;
+
+ /*
+ * Filter the same 1000Hz ODR register values based on datasheet info.
+ * ODR can be equal to 1010-1111 for 1000Hz, but function returns 1010
+ * all the time.
+ */
+ if (*odr > MSA311_ODR_1000_HZ)
+ *odr = MSA311_ODR_1000_HZ;
+
+ return 0;
+}
+
+/**
+ * msa311_set_odr() - Setup Output Data Rate (ODR) value for MSA311 accel
+ * @msa311: MSA311 internal private state
+ * @odr: requested ODR value
+ *
+ * This function should be called under msa311->lock. Possible ODR values:
+ * - 1Hz (not available in normal mode)
+ * - 1.95Hz (not available in normal mode)
+ * - 3.9Hz
+ * - 7.81Hz
+ * - 15.63Hz
+ * - 31.25Hz
+ * - 62.5Hz
+ * - 125Hz
+ * - 250Hz
+ * - 500Hz
+ * - 1000Hz
+ *
+ * Return: 0 on success, -EINVAL for bad ODR value in the certain power mode,
+ * -ERRNO in other failures
+ */
+static int msa311_set_odr(struct msa311_priv *msa311, unsigned int odr)
+{
+ struct device *dev = msa311->dev;
+ unsigned int pwr_mode;
+ bool good_odr;
+ int err;
+
+ err = regmap_field_read(msa311->fields[F_PWR_MODE], &pwr_mode);
+ if (err)
+ return err;
+
+ /* Filter bad ODR values */
+ if (pwr_mode == MSA311_PWR_MODE_NORMAL)
+ good_odr = (odr > MSA311_ODR_1_95_HZ);
+ else
+ good_odr = false;
+
+ if (!good_odr) {
+ dev_err(dev,
+ "can't set odr %u.%06uHz, not available in %s mode\n",
+ msa311_odr_table[odr].integral,
+ msa311_odr_table[odr].microfract,
+ msa311_pwr_modes[pwr_mode]);
+ return -EINVAL;
+ }
+
+ return regmap_field_write(msa311->fields[F_ODR], odr);
+}
+
+/**
+ * msa311_wait_for_next_data() - Wait next accel data available after resume
+ * @msa311: MSA311 internal private state
+ *
+ * Return: 0 on success, -EINTR if msleep() was interrupted,
+ * -ERRNO in other failures
+ */
+static int msa311_wait_for_next_data(struct msa311_priv *msa311)
+{
+ static const unsigned int unintr_thresh_ms = 20;
+ struct device *dev = msa311->dev;
+ unsigned long freq_uhz;
+ unsigned long wait_ms;
+ unsigned int odr;
+ int err;
+
+ err = msa311_get_odr(msa311, &odr);
+ if (err) {
+ dev_err(dev, "can't get actual frequency (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * After msa311 resuming is done, we need to wait for data
+ * to be refreshed by accel logic.
+ * A certain timeout is calculated based on the current ODR value.
+ * If requested timeout isn't so long (let's assume 20ms),
+ * we can wait for next data in uninterruptible sleep.
+ */
+ freq_uhz = msa311_odr_table[odr].integral * MICROHZ_PER_HZ +
+ msa311_odr_table[odr].microfract;
+ wait_ms = (MICROHZ_PER_HZ / freq_uhz) * MSEC_PER_SEC;
+
+ if (wait_ms < unintr_thresh_ms)
+ usleep_range(wait_ms * USEC_PER_MSEC,
+ unintr_thresh_ms * USEC_PER_MSEC);
+ else if (msleep_interruptible(wait_ms))
+ return -EINTR;
+
+ return 0;
+}
+
+/**
+ * msa311_set_pwr_mode() - Install certain MSA311 power mode
+ * @msa311: MSA311 internal private state
+ * @mode: Power mode can be equal to NORMAL or SUSPEND
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -ERRNO on failure
+ */
+static int msa311_set_pwr_mode(struct msa311_priv *msa311, unsigned int mode)
+{
+ struct device *dev = msa311->dev;
+ unsigned int prev_mode;
+ int err;
+
+ if (mode >= ARRAY_SIZE(msa311_pwr_modes))
+ return -EINVAL;
+
+ dev_dbg(dev, "transition to %s mode\n", msa311_pwr_modes[mode]);
+
+ err = regmap_field_read(msa311->fields[F_PWR_MODE], &prev_mode);
+ if (err)
+ return err;
+
+ err = regmap_field_write(msa311->fields[F_PWR_MODE], mode);
+ if (err)
+ return err;
+
+ /* Wait actual data if we wake up */
+ if (prev_mode == MSA311_PWR_MODE_SUSPEND &&
+ mode == MSA311_PWR_MODE_NORMAL)
+ return msa311_wait_for_next_data(msa311);
+
+ return 0;
+}
+
+/**
+ * msa311_get_axis() - Read MSA311 accel data for certain IIO channel axis spec
+ * @msa311: MSA311 internal private state
+ * @chan: IIO channel specification
+ * @axis: Output accel axis data for requested IIO channel spec
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -EINVAL for unknown IIO channel specification,
+ * -ERRNO in other failures
+ */
+static int msa311_get_axis(struct msa311_priv *msa311,
+ const struct iio_chan_spec * const chan,
+ __le16 *axis)
+{
+ struct device *dev = msa311->dev;
+ unsigned int axis_reg;
+
+ if (chan->scan_index < MSA311_SI_X || chan->scan_index > MSA311_SI_Z) {
+ dev_err(dev, "invalid scan_index value [%d]\n",
+ chan->scan_index);
+ return -EINVAL;
+ }
+
+ /* Axes data layout has 2 byte gap for each axis starting from X axis */
+ axis_reg = MSA311_ACC_X_REG + (chan->scan_index << 1);
+
+ return regmap_bulk_read(msa311->regs, axis_reg, axis, sizeof(*axis));
+}
+
+static int msa311_read_raw_data(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ __le16 axis;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_get_axis(msa311, chan, &axis);
+ mutex_unlock(&msa311->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err) {
+ dev_err(dev, "can't get axis %s (%pe)\n",
+ chan->datasheet_name, ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * Axis data format is:
+ * ACC_X = (ACC_X_MSB[7:0] << 4) | ACC_X_LSB[7:4]
+ */
+ *val = sign_extend32(le16_to_cpu(axis) >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int msa311_read_scale(struct iio_dev *indio_dev, int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int fs;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = regmap_field_read(msa311->fields[F_FS], &fs);
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't get actual scale (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ *val = msa311_fs_table[fs].integral;
+ *val2 = msa311_fs_table[fs].microfract;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int msa311_read_samp_freq(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int odr;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_get_odr(msa311, &odr);
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't get actual frequency (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ *val = msa311_odr_table[odr].integral;
+ *val2 = msa311_odr_table[odr].microfract;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int msa311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return msa311_read_raw_data(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_SCALE:
+ return msa311_read_scale(indio_dev, val, val2);
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return msa311_read_samp_freq(indio_dev, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)msa311_odr_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* ODR value has 2 ints (integer and fractional parts) */
+ *length = ARRAY_SIZE(msa311_odr_table) * 2;
+ return IIO_AVAIL_LIST;
+
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)msa311_fs_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* FS value has 2 ints (integer and fractional parts) */
+ *length = ARRAY_SIZE(msa311_fs_table) * 2;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_write_scale(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int fs;
+ int err;
+
+ /* We do not have fs >= 1, so skip such values */
+ if (val)
+ return 0;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ for (fs = 0; fs < ARRAY_SIZE(msa311_fs_table); fs++)
+ /* Do not check msa311_fs_table[fs].integral, it's always 0 */
+ if (val2 == msa311_fs_table[fs].microfract) {
+ mutex_lock(&msa311->lock);
+ err = regmap_field_write(msa311->fields[F_FS], fs);
+ mutex_unlock(&msa311->lock);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't update scale (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int odr;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ /*
+ * Sampling frequency changing is prohibited when buffer mode is
+ * enabled, because sometimes MSA311 chip returns outliers during
+ * frequency values growing up in the read operation moment.
+ */
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++)
+ if (val == msa311_odr_table[odr].integral &&
+ val2 == msa311_odr_table[odr].microfract) {
+ mutex_lock(&msa311->lock);
+ err = msa311_set_odr(msa311, odr);
+ mutex_unlock(&msa311->lock);
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return msa311_write_scale(indio_dev, val, val2);
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return msa311_write_samp_freq(indio_dev, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ int err;
+
+ if (reg > regmap_get_max_register(msa311->regs))
+ return -EINVAL;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ mutex_lock(&msa311->lock);
+
+ if (readval)
+ err = regmap_read(msa311->regs, reg, readval);
+ else
+ err = regmap_write(msa311->regs, reg, writeval);
+
+ mutex_unlock(&msa311->lock);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't %s register %u from debugfs (%pe)\n",
+ str_read_write(readval), reg, ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int msa311_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int msa311_set_new_data_trig_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = regmap_field_write(msa311->fields[F_NEW_DATA_INT_EN], state);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev,
+ "can't %s buffer due to new_data_int failure (%pe)\n",
+ str_enable_disable(state), ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ return iio_trigger_get_drvdata(trig) == indio_dev ? 0 : -EINVAL;
+}
+
+static irqreturn_t msa311_buffer_thread(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct msa311_priv *msa311 = iio_priv(pf->indio_dev);
+ struct iio_dev *indio_dev = pf->indio_dev;
+ const struct iio_chan_spec *chan;
+ struct device *dev = msa311->dev;
+ int bit, err, i = 0;
+ __le16 axis;
+ struct {
+ __le16 channels[MSA311_SI_Z + 1];
+ s64 ts __aligned(8);
+ } buf;
+
+ memset(&buf, 0, sizeof(buf));
+
+ mutex_lock(&msa311->lock);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ chan = &msa311_channels[bit];
+
+ err = msa311_get_axis(msa311, chan, &axis);
+ if (err) {
+ mutex_unlock(&msa311->lock);
+ dev_err(dev, "can't get axis %s (%pe)\n",
+ chan->datasheet_name, ERR_PTR(err));
+ goto notify_done;
+ }
+
+ buf.channels[i++] = axis;
+ }
+
+ mutex_unlock(&msa311->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &buf,
+ iio_get_time_ns(indio_dev));
+
+notify_done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msa311_irq_thread(int irq, void *p)
+{
+ struct msa311_priv *msa311 = iio_priv(p);
+ unsigned int new_data_int_enabled;
+ struct device *dev = msa311->dev;
+ int err;
+
+ mutex_lock(&msa311->lock);
+
+ /*
+ * We do not check NEW_DATA int status, because based on the
+ * specification it's cleared automatically after a fixed time.
+ * So just check that is enabled by driver logic.
+ */
+ err = regmap_field_read(msa311->fields[F_NEW_DATA_INT_EN],
+ &new_data_int_enabled);
+
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't read new_data interrupt state (%pe)\n",
+ ERR_PTR(err));
+ return IRQ_NONE;
+ }
+
+ if (new_data_int_enabled)
+ iio_trigger_poll_chained(msa311->new_data_trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info msa311_info = {
+ .read_raw = msa311_read_raw,
+ .read_avail = msa311_read_avail,
+ .write_raw = msa311_write_raw,
+ .debugfs_reg_access = msa311_debugfs_reg_access,
+};
+
+static const struct iio_buffer_setup_ops msa311_buffer_setup_ops = {
+ .preenable = msa311_buffer_preenable,
+ .postdisable = msa311_buffer_postdisable,
+};
+
+static const struct iio_trigger_ops msa311_new_data_trig_ops = {
+ .set_trigger_state = msa311_set_new_data_trig_state,
+ .validate_device = msa311_validate_device,
+};
+
+static int msa311_check_partid(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ unsigned int partid;
+ int err;
+
+ err = regmap_read(msa311->regs, MSA311_PARTID_REG, &partid);
+ if (err)
+ return dev_err_probe(dev, err, "failed to read partid\n");
+
+ if (partid != MSA311_WHO_AM_I)
+ dev_warn(dev, "invalid partid (%#x), expected (%#x)\n",
+ partid, MSA311_WHO_AM_I);
+
+ msa311->chip_name = devm_kasprintf(dev, GFP_KERNEL,
+ "msa311-%02x", partid);
+ if (!msa311->chip_name)
+ return dev_err_probe(dev, -ENOMEM, "can't alloc chip name\n");
+
+ return 0;
+}
+
+static int msa311_soft_reset(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ int err;
+
+ err = regmap_write(msa311->regs, MSA311_SOFT_RESET_REG,
+ MSA311_GENMASK(F_SOFT_RESET_I2C) |
+ MSA311_GENMASK(F_SOFT_RESET_SPI));
+ if (err)
+ return dev_err_probe(dev, err, "can't soft reset all logic\n");
+
+ return 0;
+}
+
+static int msa311_chip_init(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ const char zero_bulk[2] = { };
+ int err;
+
+ err = regmap_write(msa311->regs, MSA311_RANGE_REG, MSA311_FS_16G);
+ if (err)
+ return dev_err_probe(dev, err, "failed to setup accel range\n");
+
+ /* Disable all interrupts by default */
+ err = regmap_bulk_write(msa311->regs, MSA311_INT_SET_0_REG,
+ zero_bulk, sizeof(zero_bulk));
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't disable set0/set1 interrupts\n");
+
+ /* Unmap all INT1 interrupts by default */
+ err = regmap_bulk_write(msa311->regs, MSA311_INT_MAP_0_REG,
+ zero_bulk, sizeof(zero_bulk));
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to unmap map0/map1 interrupts\n");
+
+ /* Disable all axes by default */
+ err = regmap_update_bits(msa311->regs, MSA311_ODR_REG,
+ MSA311_GENMASK(F_X_AXIS_DIS) |
+ MSA311_GENMASK(F_Y_AXIS_DIS) |
+ MSA311_GENMASK(F_Z_AXIS_DIS), 0);
+ if (err)
+ return dev_err_probe(dev, err, "can't enable all axes\n");
+
+ err = msa311_set_odr(msa311, MSA311_ODR_125_HZ);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to set accel frequency\n");
+
+ return 0;
+}
+
+static int msa311_setup_interrupts(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct iio_trigger *trig;
+ int err;
+
+ /* Keep going without interrupts if no initialized I2C IRQ */
+ if (i2c->irq <= 0)
+ return 0;
+
+ err = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ msa311_irq_thread, IRQF_ONESHOT,
+ msa311->chip_name, indio_dev);
+ if (err)
+ return dev_err_probe(dev, err, "failed to request IRQ\n");
+
+ trig = devm_iio_trigger_alloc(dev, "%s-new-data", msa311->chip_name);
+ if (!trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "can't allocate newdata trigger\n");
+
+ msa311->new_data_trig = trig;
+ msa311->new_data_trig->ops = &msa311_new_data_trig_ops;
+ iio_trigger_set_drvdata(msa311->new_data_trig, indio_dev);
+
+ err = devm_iio_trigger_register(dev, msa311->new_data_trig);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't register newdata trigger\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_OD],
+ MSA311_INT1_OD_PUSH_PULL);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't enable push-pull interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_LVL],
+ MSA311_INT1_LVL_HIGH);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't set active interrupt level\n");
+
+ err = regmap_field_write(msa311->fields[F_LATCH_INT],
+ MSA311_LATCH_INT_LATCHED);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't latch interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_RESET_INT], 1);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't reset interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_NEW_DATA], 1);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't map new data interrupt\n");
+
+ return 0;
+}
+
+static int msa311_regmap_init(struct msa311_priv *msa311)
+{
+ struct regmap_field **fields = msa311->fields;
+ struct device *dev = msa311->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct regmap *regmap;
+ int i;
+
+ regmap = devm_regmap_init_i2c(i2c, &msa311_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "failed to register i2c regmap\n");
+
+ msa311->regs = regmap;
+
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ fields[i] = devm_regmap_field_alloc(dev,
+ msa311->regs,
+ msa311_reg_fields[i]);
+ if (IS_ERR(msa311->fields[i]))
+ return dev_err_probe(dev, PTR_ERR(msa311->fields[i]),
+ "can't alloc field[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static void msa311_powerdown(void *msa311)
+{
+ msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_SUSPEND);
+}
+
+static void msa311_vdd_disable(void *vdd)
+{
+ regulator_disable(vdd);
+}
+
+static int msa311_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct msa311_priv *msa311;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*msa311));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "IIO device allocation failed\n");
+
+ msa311 = iio_priv(indio_dev);
+ msa311->dev = dev;
+ i2c_set_clientdata(i2c, indio_dev);
+
+ err = msa311_regmap_init(msa311);
+ if (err)
+ return err;
+
+ mutex_init(&msa311->lock);
+
+ msa311->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(msa311->vdd))
+ return dev_err_probe(dev, PTR_ERR(msa311->vdd),
+ "can't get vdd supply\n");
+
+ err = regulator_enable(msa311->vdd);
+ if (err)
+ return dev_err_probe(dev, err, "can't enable vdd supply\n");
+
+ err = devm_add_action_or_reset(dev, msa311_vdd_disable, msa311->vdd);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't add vdd disable action\n");
+
+ err = msa311_check_partid(msa311);
+ if (err)
+ return err;
+
+ err = msa311_soft_reset(msa311);
+ if (err)
+ return err;
+
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_NORMAL);
+ if (err)
+ return dev_err_probe(dev, err, "failed to power on device\n");
+
+ /*
+ * Register powerdown deferred callback which suspends the chip
+ * after module unloaded.
+ *
+ * MSA311 should be in SUSPEND mode in the two cases:
+ * 1) When driver is loaded, but we do not have any data or
+ * configuration requests to it (we are solving it using
+ * autosuspend feature).
+ * 2) When driver is unloaded and device is not used (devm action is
+ * used in this case).
+ */
+ err = devm_add_action_or_reset(dev, msa311_powerdown, msa311);
+ if (err)
+ return dev_err_probe(dev, err, "can't add powerdown action\n");
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_pm_runtime_enable(dev);
+ if (err)
+ return err;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSA311_PWR_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ err = msa311_chip_init(msa311);
+ if (err)
+ return err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = msa311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(msa311_channels);
+ indio_dev->name = msa311->chip_name;
+ indio_dev->info = &msa311_info;
+
+ err = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ msa311_buffer_thread,
+ &msa311_buffer_setup_ops);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't setup IIO trigger buffer\n");
+
+ err = msa311_setup_interrupts(msa311);
+ if (err)
+ return err;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err)
+ return dev_err_probe(dev, err, "IIO device register failed\n");
+
+ return 0;
+}
+
+static int msa311_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_SUSPEND);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev, "failed to power off device (%pe)\n",
+ ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_NORMAL);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev, "failed to power on device (%pe)\n",
+ ERR_PTR(err));
+
+ return err;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(msa311_pm_ops, msa311_runtime_suspend,
+ msa311_runtime_resume, NULL);
+
+static const struct i2c_device_id msa311_i2c_id[] = {
+ { .name = "msa311" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, msa311_i2c_id);
+
+static const struct of_device_id msa311_of_match[] = {
+ { .compatible = "memsensing,msa311" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msa311_of_match);
+
+static struct i2c_driver msa311_driver = {
+ .driver = {
+ .name = "msa311",
+ .of_match_table = msa311_of_match,
+ .pm = pm_ptr(&msa311_pm_ops),
+ },
+ .probe_new = msa311_probe,
+ .id_table = msa311_i2c_id,
+};
+module_i2c_driver(msa311_driver);
+
+MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
+MODULE_DESCRIPTION("MEMSensing MSA311 3-axis accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7fe5930891e0..791612ca6012 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -653,6 +653,20 @@ config MAX1118
To compile this driver as a module, choose M here: the module will be
called max1118.
+config MAX11205
+ tristate "Maxim max11205 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+ help
+ Say yes here to build support for Maxim max11205 16-bit, single-channel
+ ultra-low power delta-sigma ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max11205.
+
config MAX1241
tristate "Maxim max1241 ADC driver"
depends on SPI_MASTER
@@ -718,6 +732,8 @@ config MCP3422
config MCP3911
tristate "Microchip Technology MCP3911 driver"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Microchip Technology's MCP3911
analog to digital converter.
@@ -919,6 +935,21 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config RICHTEK_RTQ6056
+ tristate "Richtek RTQ6056 Current and Power Monitor ADC"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to enable RQT6056 ADC support.
+ RTQ6056 is a high accuracy current-sense monitor with I2C and SMBus
+ compatible interface, and the device provides full information for
+ system by reading out the load current and power.
+
+ This driver can also be built as a module. If so, the module will be
+ called rtq6056.
+
config RZG2L_ADC
tristate "Renesas RZ/G2L ADC driver"
depends on ARCH_RZG2L || COMPILE_TEST
@@ -1022,22 +1053,6 @@ config STMPE_ADC
Say yes here to build support for ST Microelectronics STMPE
built-in ADC block (stmpe811).
-config STX104
- tristate "Apex Embedded Systems STX104 driver"
- depends on PC104 && X86
- select ISA_BUS_API
- select GPIOLIB
- help
- Say yes here to build support for the Apex Embedded Systems STX104
- integrated analog PC/104 card.
-
- This driver supports the 16 channels of single-ended (8 channels of
- differential) analog inputs, 2 channels of analog output, 4 digital
- inputs, and 4 digital outputs provided by the STX104.
-
- The base port addresses for the devices may be configured via the base
- array module parameter.
-
config SUN4I_GPADC
tristate "Support for the Allwinner SoCs GPADC"
depends on IIO
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 1772a549a3c8..46caba7a010c 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
+obj-$(CONFIG_MAX11205) += max11205.o
obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MAX9611) += max9611.o
@@ -85,10 +86,10 @@ obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
-obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 930ce96e6ff5..4fa2126a354b 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -925,8 +925,8 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int ab8500_gpadc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
@@ -938,7 +938,7 @@ static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info ab8500_gpadc_info = {
- .of_xlate = ab8500_gpadc_of_xlate,
+ .fwnode_xlate = ab8500_gpadc_fwnode_xlate,
.read_raw = ab8500_gpadc_read_raw,
};
@@ -968,7 +968,7 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
/**
* ab8500_gpadc_parse_channel() - process devicetree channel configuration
* @dev: pointer to containing device
- * @np: device tree node for the channel to configure
+ * @fwnode: fw node for the channel to configure
* @ch: channel info to fill in
* @iio_chan: IIO channel specification to fill in
*
@@ -976,15 +976,15 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
* and define usage for things like AUX GPADC inputs more precisely.
*/
static int ab8500_gpadc_parse_channel(struct device *dev,
- struct device_node *np,
+ struct fwnode_handle *fwnode,
struct ab8500_gpadc_chan_info *ch,
struct iio_chan_spec *iio_chan)
{
- const char *name = np->name;
+ const char *name = fwnode_get_name(fwnode);
u32 chan;
int ret;
- ret = of_property_read_u32(np, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -1021,22 +1021,20 @@ static int ab8500_gpadc_parse_channel(struct device *dev,
/**
* ab8500_gpadc_parse_channels() - Parse the GPADC channels from DT
* @gpadc: the GPADC to configure the channels for
- * @np: device tree node containing the channel configurations
* @chans: the IIO channels we parsed
* @nchans: the number of IIO channels we parsed
*/
static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
- struct device_node *np,
struct iio_chan_spec **chans_parsed,
unsigned int *nchans_parsed)
{
- struct device_node *child;
+ struct fwnode_handle *child;
struct ab8500_gpadc_chan_info *ch;
struct iio_chan_spec *iio_chans;
unsigned int nchans;
int i;
- nchans = of_get_available_child_count(np);
+ nchans = device_get_child_node_count(gpadc->dev);
if (!nchans) {
dev_err(gpadc->dev, "no channel children\n");
return -ENODEV;
@@ -1054,7 +1052,7 @@ static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ device_for_each_child_node(gpadc->dev, child) {
struct iio_chan_spec *iio_chan;
int ret;
@@ -1064,7 +1062,7 @@ static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
ret = ab8500_gpadc_parse_channel(gpadc->dev, child, ch,
iio_chan);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
i++;
@@ -1081,7 +1079,6 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
struct ab8500_gpadc *gpadc;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
struct iio_chan_spec *iio_chans;
unsigned int n_iio_chans;
int ret;
@@ -1096,7 +1093,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->dev = dev;
gpadc->ab8500 = dev_get_drvdata(dev->parent);
- ret = ab8500_gpadc_parse_channels(gpadc, np, &iio_chans, &n_iio_chans);
+ ret = ab8500_gpadc_parse_channels(gpadc, &iio_chans, &n_iio_chans);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index c5b785d8b241..4088786e1026 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -936,11 +936,6 @@ static void ad7124_reg_disable(void *r)
regulator_disable(r);
}
-static void ad7124_clk_disable(void *c)
-{
- clk_disable_unprepare(c);
-}
-
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -993,18 +988,10 @@ static int ad7124_probe(struct spi_device *spi)
return ret;
}
- st->mclk = devm_clk_get(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7124_clk_disable, st->mclk);
- if (ret)
- return ret;
-
ret = ad7124_soft_reset(st);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 652db768ef37..70a25949142c 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -539,13 +539,6 @@ static void ad7768_regulator_disable(void *data)
regulator_disable(st->vref);
}
-static void ad7768_clk_disable(void *data)
-{
- struct ad7768_state *st = data;
-
- clk_disable_unprepare(st->mclk);
-}
-
static int ad7768_set_channel_label(struct iio_dev *indio_dev,
int num_channels)
{
@@ -600,18 +593,10 @@ static int ad7768_probe(struct spi_device *spi)
if (ret)
return ret;
- st->mclk = devm_clk_get(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7768_clk_disable, st);
- if (ret)
- return ret;
-
st->mclk_freq = clk_get_rate(st->mclk);
mutex_init(&st->lock);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index edad1f30121d..9d6bf6d0927a 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
@@ -93,6 +94,7 @@ enum ad7923_id {
.sign = 'u', \
.realbits = (bits), \
.storagebits = 16, \
+ .shift = 12 - (bits), \
.endianness = IIO_BE, \
}, \
}
@@ -268,7 +270,8 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
return ret;
if (chan->address == EXTRACT(ret, 12, 4))
- *val = EXTRACT(ret, 0, 12);
+ *val = EXTRACT(ret, chan->scan_type.shift,
+ chan->scan_type.realbits);
else
return -EIO;
@@ -298,6 +301,7 @@ static void ad7923_regulator_disable(void *data)
static int ad7923_probe(struct spi_device *spi)
{
+ u32 ad7923_range = AD7923_RANGE;
struct ad7923_state *st;
struct iio_dev *indio_dev;
const struct ad7923_chip_info *info;
@@ -309,8 +313,11 @@ static int ad7923_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ if (device_property_read_bool(&spi->dev, "adi,range-double"))
+ ad7923_range = 0;
+
st->spi = spi;
- st->settings = AD7923_CODING | AD7923_RANGE |
+ st->settings = AD7923_CODING | ad7923_range |
AD7923_PM_MODE_WRITE(AD7923_PM_MODE_OPS);
info = &ad7923_chip_info[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 5a5f33f7bc8f..7534572f7475 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -378,13 +378,6 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
return ad9467_outputmode_set(st->spi, st->output_mode);
}
-static void ad9467_clk_disable(void *data)
-{
- struct ad9467_state *st = data;
-
- clk_disable_unprepare(st->clk);
-}
-
static int ad9467_probe(struct spi_device *spi)
{
const struct ad9467_chip_info *info;
@@ -404,18 +397,10 @@ static int ad9467_probe(struct spi_device *spi)
st = adi_axi_adc_conv_priv(conv);
st->spi = spi;
- st->clk = devm_clk_get(&spi->dev, "adc-clk");
+ st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
- ret = clk_prepare_enable(st->clk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
- if (ret)
- return ret;
-
st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(st->pwrdown_gpio))
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 279430c1d88c..4294d6539cdb 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -16,9 +16,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/sched.h>
+#include <linux/units.h>
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -26,9 +28,13 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/nvmem-consumer.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <dt-bindings/iio/adc/at91-sama5d2_adc.h>
+
struct at91_adc_reg_layout {
/* Control Register */
u16 CR;
@@ -73,11 +79,14 @@ struct at91_adc_reg_layout {
/* Startup Time */
#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
#define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
+/* Minimum startup time for temperature sensor */
+#define AT91_SAMA5D2_MR_STARTUP_TS_MIN (50)
/* Analog Change */
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
-#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+#define AT91_SAMA5D2_MR_TRACKTIM_TS 6
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf
/* Transfer Time */
#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
@@ -138,11 +147,19 @@ struct at91_adc_reg_layout {
/* Extended Mode Register */
u16 EMR;
/* Extended Mode Register - Oversampling rate */
-#define AT91_SAMA5D2_EMR_OSR(V) ((V) << 16)
-#define AT91_SAMA5D2_EMR_OSR_MASK GENMASK(17, 16)
+#define AT91_SAMA5D2_EMR_OSR(V, M) (((V) << 16) & (M))
#define AT91_SAMA5D2_EMR_OSR_1SAMPLES 0
#define AT91_SAMA5D2_EMR_OSR_4SAMPLES 1
#define AT91_SAMA5D2_EMR_OSR_16SAMPLES 2
+#define AT91_SAMA5D2_EMR_OSR_64SAMPLES 3
+#define AT91_SAMA5D2_EMR_OSR_256SAMPLES 4
+
+/* Extended Mode Register - TRACKX */
+#define AT91_SAMA5D2_TRACKX_MASK GENMASK(23, 22)
+#define AT91_SAMA5D2_TRACKX(x) (((x) << 22) & \
+ AT91_SAMA5D2_TRACKX_MASK)
+/* TRACKX for temperature sensor. */
+#define AT91_SAMA5D2_TRACKX_TS (1)
/* Extended Mode Register - Averaging on single trigger event */
#define AT91_SAMA5D2_EMR_ASTE(V) ((V) << 20)
@@ -159,6 +176,8 @@ struct at91_adc_reg_layout {
u16 ACR;
/* Analog Control Register - Pen detect sensitivity mask */
#define AT91_SAMA5D2_ACR_PENDETSENS_MASK GENMASK(1, 0)
+/* Analog Control Register - Source last channel */
+#define AT91_SAMA5D2_ACR_SRCLCH BIT(16)
/* Touchscreen Mode Register */
u16 TSMR;
@@ -226,6 +245,10 @@ struct at91_adc_reg_layout {
u16 WPSR;
/* Version Register */
u16 VERSION;
+/* Temperature Sensor Mode Register */
+ u16 TEMPMR;
+/* Temperature Sensor Mode - Temperature sensor on */
+#define AT91_SAMA5D2_TEMPMR_TEMPON BIT(0)
};
static const struct at91_adc_reg_layout sama5d2_layout = {
@@ -280,6 +303,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.EOC_IDR = 0x38,
.EOC_IMR = 0x3c,
.EOC_ISR = 0x40,
+ .TEMPMR = 0x44,
.OVER = 0x4c,
.EMR = 0x50,
.CWR = 0x54,
@@ -305,11 +329,6 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-/* Possible values for oversampling ratio */
-#define AT91_OSR_1SAMPLES 1
-#define AT91_OSR_4SAMPLES 4
-#define AT91_OSR_16SAMPLES 16
-
#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
{ \
.type = IIO_VOLTAGE, \
@@ -325,6 +344,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num, \
.indexed = 1, \
}
@@ -346,6 +367,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num"-CH"#num2, \
.indexed = 1, \
}
@@ -365,6 +388,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = name, \
}
#define AT91_SAMA5D2_CHAN_PRESSURE(num, name) \
@@ -380,6 +405,23 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+
+#define AT91_SAMA5D2_CHAN_TEMP(num, name, addr) \
+ { \
+ .type = IIO_TEMP, \
+ .channel = num, \
+ .address = addr, \
+ .scan_index = num, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = name, \
}
@@ -403,6 +445,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
* @max_index: highest channel index (highest index may be higher
* than the total channel number)
* @hw_trig_cnt: number of possible hardware triggers
+ * @osr_mask: oversampling ratio bitmask on EMR register
+ * @oversampling_avail: available oversampling values
+ * @oversampling_avail_no: number of available oversampling values
+ * @chan_realbits: realbits for registered channels
+ * @temp_chan: temperature channel index
+ * @temp_sensor: temperature sensor supported
*/
struct at91_adc_platform {
const struct at91_adc_reg_layout *layout;
@@ -414,20 +462,58 @@ struct at91_adc_platform {
unsigned int max_channels;
unsigned int max_index;
unsigned int hw_trig_cnt;
+ unsigned int osr_mask;
+ unsigned int oversampling_avail[5];
+ unsigned int oversampling_avail_no;
+ unsigned int chan_realbits;
+ unsigned int temp_chan;
+ bool temp_sensor;
+};
+
+/**
+ * struct at91_adc_temp_sensor_clb - at91-sama5d2 temperature sensor
+ * calibration data structure
+ * @p1: P1 calibration temperature
+ * @p4: P4 calibration voltage
+ * @p6: P6 calibration voltage
+ */
+struct at91_adc_temp_sensor_clb {
+ u32 p1;
+ u32 p4;
+ u32 p6;
+};
+
+/**
+ * enum at91_adc_ts_clb_idx - calibration indexes in NVMEM buffer
+ * @AT91_ADC_TS_CLB_IDX_P1: index for P1
+ * @AT91_ADC_TS_CLB_IDX_P4: index for P4
+ * @AT91_ADC_TS_CLB_IDX_P6: index for P6
+ * @AT91_ADC_TS_CLB_IDX_MAX: max index for temperature calibration packet in OTP
+ */
+enum at91_adc_ts_clb_idx {
+ AT91_ADC_TS_CLB_IDX_P1 = 2,
+ AT91_ADC_TS_CLB_IDX_P4 = 5,
+ AT91_ADC_TS_CLB_IDX_P6 = 7,
+ AT91_ADC_TS_CLB_IDX_MAX = 19,
};
+/* Temperature sensor calibration - Vtemp voltage sensitivity to temperature. */
+#define AT91_ADC_TS_VTEMP_DT (2080U)
+
/**
* struct at91_adc_soc_info - at91-sama5d2 soc information struct
* @startup_time: device startup time
* @min_sample_rate: minimum sample rate in Hz
* @max_sample_rate: maximum sample rate in Hz
* @platform: pointer to the platform structure
+ * @temp_sensor_clb: temperature sensor calibration data structure
*/
struct at91_adc_soc_info {
unsigned startup_time;
unsigned min_sample_rate;
unsigned max_sample_rate;
const struct at91_adc_platform *platform;
+ struct at91_adc_temp_sensor_clb temp_sensor_clb;
};
struct at91_adc_trigger {
@@ -475,6 +561,18 @@ struct at91_adc_touch {
struct work_struct workq;
};
+/**
+ * struct at91_adc_temp - at91-sama5d2 temperature information structure
+ * @sample_period_val: sample period value
+ * @saved_sample_rate: saved sample rate
+ * @saved_oversampling: saved oversampling
+ */
+struct at91_adc_temp {
+ u16 sample_period_val;
+ u16 saved_sample_rate;
+ u16 saved_oversampling;
+};
+
/*
* Buffer size requirements:
* No channels * bytes_per_channel(2) + timestamp bytes (8)
@@ -502,7 +600,9 @@ struct at91_adc_state {
wait_queue_head_t wq_data_available;
struct at91_adc_dma dma_st;
struct at91_adc_touch touch_st;
+ struct at91_adc_temp temp_st;
struct iio_dev *indio_dev;
+ struct device *dev;
/* Ensure naturally aligned timestamp */
u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
/*
@@ -591,6 +691,7 @@ static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
+ AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
static const struct at91_adc_platform sama5d2_platform = {
@@ -612,6 +713,10 @@ static const struct at91_adc_platform sama5d2_platform = {
.max_index = AT91_SAMA5D2_MAX_CHAN_IDX,
#define AT91_SAMA5D2_HW_TRIG_CNT 3
.hw_trig_cnt = AT91_SAMA5D2_HW_TRIG_CNT,
+ .osr_mask = GENMASK(17, 16),
+ .oversampling_avail = { 1, 4, 16, },
+ .oversampling_avail_no = 3,
+ .chan_realbits = 14,
};
static const struct at91_adc_platform sama7g5_platform = {
@@ -619,14 +724,23 @@ static const struct at91_adc_platform sama7g5_platform = {
.adc_channels = &at91_sama7g5_adc_channels,
#define AT91_SAMA7G5_SINGLE_CHAN_CNT 16
#define AT91_SAMA7G5_DIFF_CHAN_CNT 8
+#define AT91_SAMA7G5_TEMP_CHAN_CNT 1
.nr_channels = AT91_SAMA7G5_SINGLE_CHAN_CNT +
- AT91_SAMA7G5_DIFF_CHAN_CNT,
+ AT91_SAMA7G5_DIFF_CHAN_CNT +
+ AT91_SAMA7G5_TEMP_CHAN_CNT,
#define AT91_SAMA7G5_MAX_CHAN_IDX (AT91_SAMA7G5_SINGLE_CHAN_CNT + \
- AT91_SAMA7G5_DIFF_CHAN_CNT)
+ AT91_SAMA7G5_DIFF_CHAN_CNT + \
+ AT91_SAMA7G5_TEMP_CHAN_CNT)
.max_channels = ARRAY_SIZE(at91_sama7g5_adc_channels),
.max_index = AT91_SAMA7G5_MAX_CHAN_IDX,
#define AT91_SAMA7G5_HW_TRIG_CNT 3
.hw_trig_cnt = AT91_SAMA7G5_HW_TRIG_CNT,
+ .osr_mask = GENMASK(18, 16),
+ .oversampling_avail = { 1, 4, 16, 64, 256, },
+ .oversampling_avail_no = 5,
+ .chan_realbits = 16,
+ .temp_sensor = true,
+ .temp_chan = AT91_SAMA7G5_ADC_TEMP_CHANNEL,
};
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
@@ -650,8 +764,8 @@ at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
return indio_dev->channels + index;
}
-static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static inline int at91_adc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
}
@@ -725,51 +839,91 @@ static void at91_adc_eoc_ena(struct at91_adc_state *st, unsigned int channel)
at91_adc_writel(st, EOC_IER, BIT(channel));
}
-static void at91_adc_config_emr(struct at91_adc_state *st)
+static int at91_adc_config_emr(struct at91_adc_state *st,
+ u32 oversampling_ratio, u32 trackx)
{
/* configure the extended mode register */
- unsigned int emr = at91_adc_readl(st, EMR);
-
- /* select oversampling per single trigger event */
- emr |= AT91_SAMA5D2_EMR_ASTE(1);
+ unsigned int emr, osr;
+ unsigned int osr_mask = st->soc_info.platform->osr_mask;
+ int i, ret;
- /* delete leftover content if it's the case */
- emr &= ~AT91_SAMA5D2_EMR_OSR_MASK;
+ /* Check against supported oversampling values. */
+ for (i = 0; i < st->soc_info.platform->oversampling_avail_no; i++) {
+ if (oversampling_ratio == st->soc_info.platform->oversampling_avail[i])
+ break;
+ }
+ if (i == st->soc_info.platform->oversampling_avail_no)
+ return -EINVAL;
/* select oversampling ratio from configuration */
- switch (st->oversampling_ratio) {
- case AT91_OSR_1SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ switch (oversampling_ratio) {
+ case 1:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES,
+ osr_mask);
break;
- case AT91_OSR_4SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ case 4:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES,
+ osr_mask);
break;
- case AT91_OSR_16SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ case 16:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES,
+ osr_mask);
+ break;
+ case 64:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_64SAMPLES,
+ osr_mask);
+ break;
+ case 256:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_256SAMPLES,
+ osr_mask);
break;
}
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
+ emr = at91_adc_readl(st, EMR);
+ /* select oversampling per single trigger event */
+ emr |= AT91_SAMA5D2_EMR_ASTE(1);
+ /* delete leftover content if it's the case */
+ emr &= ~(osr_mask | AT91_SAMA5D2_TRACKX_MASK);
+ /* Update osr and trackx. */
+ emr |= osr | AT91_SAMA5D2_TRACKX(trackx);
at91_adc_writel(st, EMR, emr);
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
+ st->oversampling_ratio = oversampling_ratio;
+
+ return 0;
}
static int at91_adc_adjust_val_osr(struct at91_adc_state *st, int *val)
{
- if (st->oversampling_ratio == AT91_OSR_1SAMPLES) {
- /*
- * in this case we only have 12 bits of real data, but channel
- * is registered as 14 bits, so shift left two bits
- */
- *val <<= 2;
- } else if (st->oversampling_ratio == AT91_OSR_4SAMPLES) {
- /*
- * in this case we have 13 bits of real data, but channel
- * is registered as 14 bits, so left shift one bit
- */
- *val <<= 1;
- }
+ int nbits, diff;
+
+ if (st->oversampling_ratio == 1)
+ nbits = 12;
+ else if (st->oversampling_ratio == 4)
+ nbits = 13;
+ else if (st->oversampling_ratio == 16)
+ nbits = 14;
+ else if (st->oversampling_ratio == 64)
+ nbits = 15;
+ else if (st->oversampling_ratio == 256)
+ nbits = 16;
+ else
+ /* Should not happen. */
+ return -EINVAL;
+
+ /*
+ * We have nbits of real data and channel is registered as
+ * st->soc_info.platform->chan_realbits, so shift left diff bits.
+ */
+ diff = st->soc_info.platform->chan_realbits - nbits;
+ *val <<= diff;
return IIO_VAL_INT;
}
@@ -799,15 +953,22 @@ static void at91_adc_adjust_val_osr_array(struct at91_adc_state *st, void *buf,
static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
{
u32 clk_khz = st->current_sample_rate / 1000;
- int i = 0;
+ int i = 0, ret;
u16 pendbc;
u32 tsmr, acr;
- if (!state) {
+ if (state) {
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+ } else {
/* disabling touch IRQs and setting mode to no touch enabled */
at91_adc_writel(st, IDR,
AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
at91_adc_writel(st, TSMR, 0);
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
return 0;
}
/*
@@ -948,10 +1109,9 @@ static int at91_adc_read_pressure(struct at91_adc_state *st, int chan, u16 *val)
return IIO_VAL_INT;
}
-static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+static void at91_adc_configure_trigger_registers(struct at91_adc_state *st,
+ bool state)
{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
- struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, TRGR);
/* clear TRGMOD */
@@ -962,6 +1122,26 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
/* set/unset hw trigger */
at91_adc_writel(st, TRGR, status);
+}
+
+static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+ struct at91_adc_state *st = iio_priv(indio);
+ int ret;
+
+ if (state) {
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ at91_adc_configure_trigger_registers(st, state);
+
+ if (!state) {
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ }
return 0;
}
@@ -1120,11 +1300,15 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/* we continue with the triggered buffer */
ret = at91_adc_dma_start(indio_dev);
if (ret) {
dev_err(&indio_dev->dev, "buffer prepare failed\n");
- return ret;
+ goto pm_runtime_put;
}
for_each_set_bit(bit, indio_dev->active_scan_mask,
@@ -1135,7 +1319,8 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
continue;
/* these channel types cannot be handled by this trigger */
if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
+ chan->type == IIO_PRESSURE ||
+ chan->type == IIO_TEMP)
continue;
at91_adc_cor(st, chan);
@@ -1146,12 +1331,16 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
if (at91_adc_buffer_check_use_irq(indio_dev, st))
at91_adc_writel(st, IER, AT91_SAMA5D2_IER_DRDY);
- return 0;
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ return ret;
}
static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
u8 bit;
/* check if we are disabling triggered buffer or the touchscreen */
@@ -1162,6 +1351,10 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/*
* For each enable channel we must disable it in hardware.
* In the case of DMA, we must read the last converted value
@@ -1177,7 +1370,8 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
continue;
/* these channel types are virtual, no need to do anything */
if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
+ chan->type == IIO_PRESSURE ||
+ chan->type == IIO_TEMP)
continue;
at91_adc_writel(st, CHDR, BIT(chan->channel));
@@ -1196,6 +1390,9 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (st->dma_st.dma_chan)
dmaengine_terminate_sync(st->dma_st.dma_chan);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return 0;
}
@@ -1224,6 +1421,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
return trig;
}
+
static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
struct iio_poll_func *pf)
{
@@ -1377,25 +1575,35 @@ static unsigned at91_adc_startup_time(unsigned startup_time_min,
return i;
}
-static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq)
+static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq,
+ unsigned int startup_time,
+ unsigned int tracktim)
{
struct at91_adc_state *st = iio_priv(indio_dev);
unsigned f_per, prescal, startup, mr;
+ int ret;
f_per = clk_get_rate(st->per_clk);
prescal = (f_per / (2 * freq)) - 1;
- startup = at91_adc_startup_time(st->soc_info.startup_time,
- freq / 1000);
+ startup = at91_adc_startup_time(startup_time, freq / 1000);
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return;
mr = at91_adc_readl(st, MR);
mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
mr |= AT91_SAMA5D2_MR_STARTUP(startup);
mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
+ mr |= AT91_SAMA5D2_MR_TRACKTIM(tracktim);
at91_adc_writel(st, MR, mr);
- dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
- freq, startup, prescal);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
+ dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u, tracktim=%u\n",
+ freq, startup, prescal, tracktim);
st->current_sample_rate = freq;
}
@@ -1522,6 +1730,7 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
return IRQ_HANDLED;
}
+/* This needs to be called with direct mode claimed and st->lock locked. */
static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -1529,50 +1738,46 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
u16 tmp_val;
int ret;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/*
* Keep in mind that we cannot use software trigger or touchscreen
* if external trigger is enabled
*/
if (chan->type == IIO_POSITIONRELATIVE) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
ret = at91_adc_read_position(st, chan->channel,
&tmp_val);
*val = tmp_val;
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
- return at91_adc_adjust_val_osr(st, val);
+ goto pm_runtime_put;
}
if (chan->type == IIO_PRESSURE) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
ret = at91_adc_read_pressure(st, chan->channel,
&tmp_val);
*val = tmp_val;
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
- return at91_adc_adjust_val_osr(st, val);
+ goto pm_runtime_put;
}
- /* in this case we have a voltage channel */
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
+ /* in this case we have a voltage or temperature channel */
st->chan = chan;
at91_adc_cor(st, chan);
at91_adc_writel(st, CHER, BIT(chan->channel));
+ /*
+ * TEMPMR.TEMPON needs to update after CHER otherwise if none
+ * of the channels are enabled and TEMPMR.TEMPON = 1 will
+ * trigger DRDY interruption while preparing for temperature read.
+ */
+ if (chan->type == IIO_TEMP)
+ at91_adc_writel(st, TEMPMR, AT91_SAMA5D2_TEMPMR_TEMPON);
at91_adc_eoc_ena(st, chan->channel);
at91_adc_writel(st, CR, AT91_SAMA5D2_CR_START);
@@ -1592,14 +1797,125 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
}
at91_adc_eoc_dis(st, st->chan->channel);
+ if (chan->type == IIO_TEMP)
+ at91_adc_writel(st, TEMPMR, 0U);
at91_adc_writel(st, CHDR, BIT(chan->channel));
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, LCDR);
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ return ret;
+}
+
+static int at91_adc_read_info_locked(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ ret = at91_adc_read_info_raw(indio_dev, chan, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static void at91_adc_temp_sensor_configure(struct at91_adc_state *st,
+ bool start)
+{
+ u32 sample_rate, oversampling_ratio;
+ u32 startup_time, tracktim, trackx;
+
+ if (start) {
+ /*
+ * Configure the sensor for best accuracy: 10MHz frequency,
+ * oversampling rate of 256, tracktim=0xf and trackx=1.
+ */
+ sample_rate = 10 * MEGA;
+ oversampling_ratio = 256;
+ startup_time = AT91_SAMA5D2_MR_STARTUP_TS_MIN;
+ tracktim = AT91_SAMA5D2_MR_TRACKTIM_TS;
+ trackx = AT91_SAMA5D2_TRACKX_TS;
+
+ st->temp_st.saved_sample_rate = st->current_sample_rate;
+ st->temp_st.saved_oversampling = st->oversampling_ratio;
+ } else {
+ /* Go back to previous settings. */
+ sample_rate = st->temp_st.saved_sample_rate;
+ oversampling_ratio = st->temp_st.saved_oversampling;
+ startup_time = st->soc_info.startup_time;
+ tracktim = 0;
+ trackx = 0;
+ }
+
+ at91_adc_setup_samp_freq(st->indio_dev, sample_rate, startup_time,
+ tracktim);
+ at91_adc_config_emr(st, oversampling_ratio, trackx);
+}
+
+static int at91_adc_read_temp(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ struct at91_adc_temp_sensor_clb *clb = &st->soc_info.temp_sensor_clb;
+ u64 div1, div2;
+ u32 tmp;
+ int ret, vbg, vtemp;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ goto unlock;
+
+ at91_adc_temp_sensor_configure(st, true);
+
+ /* Read VBG. */
+ tmp = at91_adc_readl(st, ACR);
+ tmp |= AT91_SAMA5D2_ACR_SRCLCH;
+ at91_adc_writel(st, ACR, tmp);
+ ret = at91_adc_read_info_raw(indio_dev, chan, &vbg);
+ if (ret < 0)
+ goto restore_config;
+
+ /* Read VTEMP. */
+ tmp &= ~AT91_SAMA5D2_ACR_SRCLCH;
+ at91_adc_writel(st, ACR, tmp);
+ ret = at91_adc_read_info_raw(indio_dev, chan, &vtemp);
+
+restore_config:
+ /* Revert previous settings. */
+ at91_adc_temp_sensor_configure(st, false);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+unlock:
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Temp[milli] = p1[milli] + (vtemp * clb->p6 - clb->p4 * vbg)/
+ * (vbg * AT91_ADC_TS_VTEMP_DT)
+ */
+ div1 = DIV_ROUND_CLOSEST_ULL(((u64)vtemp * clb->p6), vbg);
+ div1 = DIV_ROUND_CLOSEST_ULL((div1 * 1000), AT91_ADC_TS_VTEMP_DT);
+ div2 = DIV_ROUND_CLOSEST_ULL((u64)clb->p4, AT91_ADC_TS_VTEMP_DT);
+ div2 *= 1000;
+ *val = clb->p1 + (int)div1 - (int)div2;
+
return ret;
}
@@ -1611,7 +1927,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return at91_adc_read_info_raw(indio_dev, chan, val);
+ return at91_adc_read_info_locked(indio_dev, chan, val);
+
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
if (chan->differential)
@@ -1619,6 +1936,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+ return at91_adc_read_temp(indio_dev, chan, val);
+
case IIO_CHAN_INFO_SAMP_FREQ:
*val = at91_adc_get_sample_freq(st);
return IIO_VAL_INT;
@@ -1637,31 +1959,60 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- if ((val != AT91_OSR_1SAMPLES) && (val != AT91_OSR_4SAMPLES) &&
- (val != AT91_OSR_16SAMPLES))
- return -EINVAL;
/* if no change, optimize out */
if (val == st->oversampling_ratio)
return 0;
- st->oversampling_ratio = val;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
/* update ratio */
- at91_adc_config_emr(st);
- return 0;
+ ret = at91_adc_config_emr(st, val, 0);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
- at91_adc_setup_samp_freq(indio_dev, val);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+ at91_adc_setup_samp_freq(indio_dev, val,
+ st->soc_info.startup_time, 0);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return 0;
default:
return -EINVAL;
}
}
+static int at91_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = (int *)st->soc_info.platform->oversampling_avail;
+ *type = IIO_VAL_INT;
+ *length = st->soc_info.platform->oversampling_avail_no;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static void at91_adc_dma_init(struct at91_adc_state *st)
{
struct device *dev = &st->indio_dev->dev;
@@ -1817,10 +2168,11 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev)
at91_adc_writel(st, MR,
AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
- at91_adc_setup_samp_freq(indio_dev, st->soc_info.min_sample_rate);
+ at91_adc_setup_samp_freq(indio_dev, st->soc_info.min_sample_rate,
+ st->soc_info.startup_time, 0);
/* configure extended mode register */
- at91_adc_config_emr(st);
+ at91_adc_config_emr(st, st->oversampling_ratio, 0);
}
static ssize_t at91_adc_get_fifo_state(struct device *dev,
@@ -1849,20 +2201,6 @@ static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
-static IIO_CONST_ATTR(oversampling_ratio_available,
- __stringify(AT91_OSR_1SAMPLES) " "
- __stringify(AT91_OSR_4SAMPLES) " "
- __stringify(AT91_OSR_16SAMPLES));
-
-static struct attribute *at91_adc_attributes[] = {
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group at91_adc_attribute_group = {
- .attrs = at91_adc_attributes,
-};
-
static const struct attribute *at91_adc_fifo_attributes[] = {
&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
@@ -1872,11 +2210,11 @@ static const struct attribute *at91_adc_fifo_attributes[] = {
};
static const struct iio_info at91_adc_info = {
- .attrs = &at91_adc_attribute_group,
+ .read_avail = &at91_adc_read_avail,
.read_raw = &at91_adc_read_raw,
.write_raw = &at91_adc_write_raw,
.update_scan_mode = &at91_adc_update_scan_mode,
- .of_xlate = &at91_adc_of_xlate,
+ .fwnode_xlate = &at91_adc_fwnode_xlate,
.hwfifo_set_watermark = &at91_adc_set_watermark,
};
@@ -1918,12 +2256,62 @@ static int at91_adc_buffer_and_trigger_init(struct device *dev,
return 0;
}
+static int at91_adc_temp_sensor_init(struct at91_adc_state *st,
+ struct device *dev)
+{
+ struct at91_adc_temp_sensor_clb *clb = &st->soc_info.temp_sensor_clb;
+ struct nvmem_cell *temp_calib;
+ u32 *buf;
+ size_t len;
+ int ret = 0;
+
+ if (!st->soc_info.platform->temp_sensor)
+ return 0;
+
+ /* Get the calibration data from NVMEM. */
+ temp_calib = devm_nvmem_cell_get(dev, "temperature_calib");
+ if (IS_ERR(temp_calib)) {
+ ret = PTR_ERR(temp_calib);
+ if (ret != -ENOENT)
+ dev_err(dev, "Failed to get temperature_calib cell!\n");
+ return ret;
+ }
+
+ buf = nvmem_cell_read(temp_calib, &len);
+ if (IS_ERR(buf)) {
+ dev_err(dev, "Failed to read calibration data!\n");
+ return PTR_ERR(buf);
+ }
+ if (len < AT91_ADC_TS_CLB_IDX_MAX * 4) {
+ dev_err(dev, "Invalid calibration data!\n");
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ /* Store calibration data for later use. */
+ clb->p1 = buf[AT91_ADC_TS_CLB_IDX_P1];
+ clb->p4 = buf[AT91_ADC_TS_CLB_IDX_P4];
+ clb->p6 = buf[AT91_ADC_TS_CLB_IDX_P6];
+
+ /*
+ * We prepare here the conversion to milli and also add constant
+ * factor (5 degrees Celsius) to p1 here to avoid doing it on
+ * hotpath.
+ */
+ clb->p1 = clb->p1 * 1000 + 5000;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
static int at91_adc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct at91_adc_state *st;
struct resource *res;
- int ret, i;
+ int ret, i, num_channels;
u32 edge_type = IRQ_TYPE_NONE;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
@@ -1933,13 +2321,20 @@ static int at91_adc_probe(struct platform_device *pdev)
st = iio_priv(indio_dev);
st->indio_dev = indio_dev;
- st->soc_info.platform = of_device_get_match_data(&pdev->dev);
+ st->soc_info.platform = device_get_match_data(dev);
+
+ ret = at91_adc_temp_sensor_init(st, &pdev->dev);
+ /* Don't register temperature channel if initialization failed. */
+ if (ret)
+ num_channels = st->soc_info.platform->max_channels - 1;
+ else
+ num_channels = st->soc_info.platform->max_channels;
indio_dev->name = dev_name(&pdev->dev);
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->info = &at91_adc_info;
indio_dev->channels = *st->soc_info.platform->adc_channels;
- indio_dev->num_channels = st->soc_info.platform->max_channels;
+ indio_dev->num_channels = num_channels;
bitmap_set(&st->touch_st.channels_bitmask,
st->soc_info.platform->touch_chan_x, 1);
@@ -1948,36 +2343,34 @@ static int at91_adc_probe(struct platform_device *pdev)
bitmap_set(&st->touch_st.channels_bitmask,
st->soc_info.platform->touch_chan_p, 1);
- st->oversampling_ratio = AT91_OSR_1SAMPLES;
+ st->oversampling_ratio = 1;
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,min-sample-rate-hz",
- &st->soc_info.min_sample_rate);
+ ret = device_property_read_u32(dev, "atmel,min-sample-rate-hz",
+ &st->soc_info.min_sample_rate);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,min-sample-rate-hz\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,max-sample-rate-hz",
- &st->soc_info.max_sample_rate);
+ ret = device_property_read_u32(dev, "atmel,max-sample-rate-hz",
+ &st->soc_info.max_sample_rate);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,max-sample-rate-hz\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
- &st->soc_info.startup_time);
+ ret = device_property_read_u32(dev, "atmel,startup-time-ms",
+ &st->soc_info.startup_time);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,startup-time-ms\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,trigger-edge-type", &edge_type);
+ ret = device_property_read_u32(dev, "atmel,trigger-edge-type",
+ &edge_type);
if (ret) {
dev_dbg(&pdev->dev,
"atmel,trigger-edge-type not specified, only software trigger available\n");
@@ -2051,13 +2444,19 @@ static int at91_adc_probe(struct platform_device *pdev)
if (ret)
goto vref_disable;
- at91_adc_hw_init(indio_dev);
-
platform_set_drvdata(pdev, indio_dev);
+ st->dev = &pdev->dev;
+ pm_runtime_set_autosuspend_delay(st->dev, 500);
+ pm_runtime_use_autosuspend(st->dev);
+ pm_runtime_set_active(st->dev);
+ pm_runtime_enable(st->dev);
+ pm_runtime_get_noresume(st->dev);
+
+ at91_adc_hw_init(indio_dev);
ret = at91_adc_buffer_and_trigger_init(&pdev->dev, indio_dev);
if (ret < 0)
- goto per_clk_disable_unprepare;
+ goto err_pm_disable;
if (dma_coerce_mask_and_coherent(&indio_dev->dev, DMA_BIT_MASK(32)))
dev_info(&pdev->dev, "cannot set DMA mask to 32-bit\n");
@@ -2073,11 +2472,18 @@ static int at91_adc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + st->soc_info.platform->layout->VERSION));
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return 0;
dma_disable:
at91_adc_dma_disable(st);
-per_clk_disable_unprepare:
+err_pm_disable:
+ pm_runtime_put_noidle(st->dev);
+ pm_runtime_disable(st->dev);
+ pm_runtime_set_suspended(st->dev);
+ pm_runtime_dont_use_autosuspend(st->dev);
clk_disable_unprepare(st->per_clk);
vref_disable:
regulator_disable(st->vref);
@@ -2095,6 +2501,8 @@ static int at91_adc_remove(struct platform_device *pdev)
at91_adc_dma_disable(st);
+ pm_runtime_disable(st->dev);
+ pm_runtime_set_suspended(st->dev);
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
@@ -2107,6 +2515,14 @@ static int at91_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
+ if (iio_buffer_enabled(indio_dev))
+ at91_adc_buffer_postdisable(indio_dev);
/*
* Do a sofware reset of the ADC before we go to suspend.
@@ -2116,6 +2532,8 @@ static int at91_adc_suspend(struct device *dev)
*/
at91_adc_writel(st, CR, AT91_SAMA5D2_CR_SWRST);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_noidle(st->dev);
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
regulator_disable(st->reg);
@@ -2145,21 +2563,28 @@ static int at91_adc_resume(struct device *dev)
if (ret)
goto vref_disable_resume;
+ pm_runtime_get_noresume(st->dev);
+
at91_adc_hw_init(indio_dev);
/* reconfiguring trigger hardware state */
- if (!iio_buffer_enabled(indio_dev))
- return 0;
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = at91_adc_buffer_prepare(indio_dev);
+ if (ret)
+ goto pm_runtime_put;
- /* check if we are enabling triggered buffer or the touchscreen */
- if (at91_adc_current_chan_is_touch(indio_dev))
- return at91_adc_configure_touch(st, true);
- else
- return at91_adc_configure_trigger(st->trig, true);
+ at91_adc_configure_trigger_registers(st, true);
+ }
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
- /* not needed but more explicit */
return 0;
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_noidle(st->dev);
+ clk_disable_unprepare(st->per_clk);
vref_disable_resume:
regulator_disable(st->vref);
reg_disable_resume:
@@ -2169,8 +2594,29 @@ resume_failed:
return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend,
- at91_adc_resume);
+static int at91_adc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ clk_disable(st->per_clk);
+
+ return 0;
+}
+
+static int at91_adc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return clk_enable(st->per_clk);
+}
+
+static const struct dev_pm_ops at91_adc_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(at91_adc_suspend, at91_adc_resume)
+ RUNTIME_PM_OPS(at91_adc_runtime_suspend, at91_adc_runtime_resume,
+ NULL)
+};
static const struct of_device_id at91_adc_dt_match[] = {
{
@@ -2191,7 +2637,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = "at91-sama5d2_adc",
.of_match_table = at91_adc_dt_match,
- .pm = pm_sleep_ptr(&at91_adc_pm_ops),
+ .pm = pm_ptr(&at91_adc_pm_ops),
},
};
module_platform_driver(at91_adc_driver)
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index e48446784a0a..36777b827165 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -202,7 +202,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
struct imx8qxp_adc *adc = iio_priv(indio_dev);
struct device *dev = adc->dev;
- u32 ctrl, vref_uv;
+ u32 ctrl;
long ret;
switch (mask) {
@@ -245,8 +245,10 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- vref_uv = regulator_get_voltage(adc->vref);
- *val = vref_uv / 1000;
+ ret = regulator_get_voltage(adc->vref);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index bf5c03c34f84..a7325dbbb99a 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -719,12 +719,12 @@ static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
}
}
-static int ingenic_adc_of_xlate(struct iio_dev *iio_dev,
- const struct of_phandle_args *iiospec)
+static int ingenic_adc_fwnode_xlate(struct iio_dev *iio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
- if (!iiospec->args_count)
+ if (!iiospec->nargs)
return -EINVAL;
for (i = 0; i < iio_dev->num_channels; ++i)
@@ -734,16 +734,11 @@ static int ingenic_adc_of_xlate(struct iio_dev *iio_dev,
return -EINVAL;
}
-static void ingenic_adc_clk_cleanup(void *data)
-{
- clk_unprepare(data);
-}
-
static const struct iio_info ingenic_adc_info = {
.write_raw = ingenic_adc_write_raw,
.read_raw = ingenic_adc_read_raw,
.read_avail = ingenic_adc_read_avail,
- .of_xlate = ingenic_adc_of_xlate,
+ .fwnode_xlate = ingenic_adc_fwnode_xlate,
};
static int ingenic_adc_buffer_enable(struct iio_dev *iio_dev)
@@ -858,13 +853,13 @@ static int ingenic_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->clk = devm_clk_get(dev, "adc");
+ adc->clk = devm_clk_get_prepared(dev, "adc");
if (IS_ERR(adc->clk)) {
dev_err(dev, "Unable to get clock\n");
return PTR_ERR(adc->clk);
}
- ret = clk_prepare_enable(adc->clk);
+ ret = clk_enable(adc->clk);
if (ret) {
dev_err(dev, "Failed to enable clock\n");
return ret;
@@ -893,12 +888,6 @@ static int ingenic_adc_probe(struct platform_device *pdev)
usleep_range(2000, 3000); /* Must wait at least 2ms. */
clk_disable(adc->clk);
- ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
- if (ret) {
- dev_err(dev, "Unable to add action\n");
- return ret;
- }
-
iio_dev->name = "jz-adc";
iio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
iio_dev->setup_ops = &ingenic_buffer_setup_ops;
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 42e6cd6fa6f7..450a243d1f7c 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -121,11 +121,6 @@ static void lpc18xx_clear_cr_reg(void *data)
writel(0, adc->base + LPC18XX_ADC_CR);
}
-static void lpc18xx_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static void lpc18xx_regulator_disable(void *vref)
{
regulator_disable(vref);
@@ -151,7 +146,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->clk = devm_clk_get(&pdev->dev, NULL);
+ adc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(adc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk),
"error getting clock\n");
@@ -177,17 +172,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(adc->clk);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev, lpc18xx_clk_disable,
- adc->clk);
- if (ret)
- return ret;
-
rate = clk_get_rate(adc->clk);
clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index dfb3bb5997e5..2593fa4322eb 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -15,6 +15,7 @@
#include <linux/iio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include "ltc2497.h"
@@ -74,6 +75,7 @@ static int ltc2496_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->common_ddata.result_and_measure = ltc2496_result_and_measure;
+ st->common_ddata.chip_info = device_get_match_data(dev);
return ltc2497core_probe(dev, indio_dev);
}
@@ -85,8 +87,13 @@ static void ltc2496_remove(struct spi_device *spi)
ltc2497core_remove(indio_dev);
}
+static const struct ltc2497_chip_info ltc2496_info = {
+ .resolution = 16,
+ .name = NULL,
+};
+
static const struct of_device_id ltc2496_of_match[] = {
- { .compatible = "lltc,ltc2496", },
+ { .compatible = "lltc,ltc2496", .data = &ltc2496_info, },
{},
};
MODULE_DEVICE_TABLE(of, ltc2496_of_match);
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
index 2a485c8a1940..f52d37af4d1f 100644
--- a/drivers/iio/adc/ltc2497-core.c
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -95,7 +95,7 @@ static int ltc2497core_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret / 1000;
- *val2 = 17;
+ *val2 = ddata->chip_info->resolution + 1;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -169,7 +169,15 @@ int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
int ret;
- indio_dev->name = dev_name(dev);
+ /*
+ * Keep using dev_name() for the iio_dev's name on some of the parts,
+ * since updating it would result in a ABI breakage.
+ */
+ if (ddata->chip_info->name)
+ indio_dev->name = ddata->chip_info->name;
+ else
+ indio_dev->name = dev_name(dev);
+
indio_dev->info = &ltc2497core_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ltc2497core_channel;
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index d58a432bafe1..556f10dfb502 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -12,18 +12,31 @@
#include <linux/iio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include <asm/unaligned.h>
#include "ltc2497.h"
+enum ltc2497_chip_type {
+ TYPE_LTC2497,
+ TYPE_LTC2499,
+};
+
struct ltc2497_driverdata {
/* this must be the first member */
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
+ u32 recv_size;
+ u32 sub_lsb;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- __be32 buf __aligned(IIO_DMA_MINALIGN);
+ union {
+ __be32 d32;
+ u8 d8[3];
+ } data __aligned(IIO_DMA_MINALIGN);
};
static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
@@ -34,13 +47,43 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
int ret;
if (val) {
- ret = i2c_master_recv(st->client, (char *)&st->buf, 3);
+ if (st->recv_size == 3)
+ ret = i2c_master_recv(st->client, (char *)&st->data.d8,
+ st->recv_size);
+ else
+ ret = i2c_master_recv(st->client, (char *)&st->data.d32,
+ st->recv_size);
if (ret < 0) {
dev_err(&st->client->dev, "i2c_master_recv failed\n");
return ret;
}
- *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+ /*
+ * The data format is 16/24 bit 2s complement, but with an upper sign bit on the
+ * resolution + 1 position, which is set for positive values only. Given this
+ * bit's value, subtracting BIT(resolution + 1) from the ADC's result is
+ * equivalent to a sign extension.
+ */
+ if (st->recv_size == 3) {
+ *val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
+ - BIT(ddata->chip_info->resolution + 1);
+ } else {
+ *val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
+ - BIT(ddata->chip_info->resolution + 1);
+ }
+
+ /*
+ * The part started a new conversion at the end of the above i2c
+ * transfer, so if the address didn't change since the last call
+ * everything is fine and we can return early.
+ * If not (which should only happen when some sort of bulk
+ * conversion is implemented) we have to program the new
+ * address. Note that this probably fails as the conversion that
+ * was triggered above is like not complete yet and the two
+ * operations have to be done in a single transfer.
+ */
+ if (ddata->addr_prev == address)
+ return 0;
}
ret = i2c_smbus_write_byte(st->client,
@@ -54,9 +97,11 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
static int ltc2497_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ltc2497_chip_info *chip_info;
struct iio_dev *indio_dev;
struct ltc2497_driverdata *st;
struct device *dev = &client->dev;
+ u32 resolution;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
@@ -71,6 +116,15 @@ static int ltc2497_probe(struct i2c_client *client,
st->client = client;
st->common_ddata.result_and_measure = ltc2497_result_and_measure;
+ chip_info = device_get_match_data(dev);
+ if (!chip_info)
+ chip_info = (const struct ltc2497_chip_info *)id->driver_data;
+ st->common_ddata.chip_info = chip_info;
+
+ resolution = chip_info->resolution;
+ st->sub_lsb = 31 - (resolution + 1);
+ st->recv_size = BITS_TO_BYTES(resolution) + 1;
+
return ltc2497core_probe(dev, indio_dev);
}
@@ -81,14 +135,27 @@ static void ltc2497_remove(struct i2c_client *client)
ltc2497core_remove(indio_dev);
}
+static const struct ltc2497_chip_info ltc2497_info[] = {
+ [TYPE_LTC2497] = {
+ .resolution = 16,
+ .name = NULL,
+ },
+ [TYPE_LTC2499] = {
+ .resolution = 24,
+ .name = "ltc2499",
+ },
+};
+
static const struct i2c_device_id ltc2497_id[] = {
- { "ltc2497", 0 },
+ { "ltc2497", (kernel_ulong_t)&ltc2497_info[TYPE_LTC2497] },
+ { "ltc2499", (kernel_ulong_t)&ltc2497_info[TYPE_LTC2499] },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc2497_id);
static const struct of_device_id ltc2497_of_match[] = {
- { .compatible = "lltc,ltc2497", },
+ { .compatible = "lltc,ltc2497", .data = &ltc2497_info[TYPE_LTC2497] },
+ { .compatible = "lltc,ltc2499", .data = &ltc2497_info[TYPE_LTC2499] },
{},
};
MODULE_DEVICE_TABLE(of, ltc2497_of_match);
diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h
index d0b42dd6b8ad..e023de0d88c4 100644
--- a/drivers/iio/adc/ltc2497.h
+++ b/drivers/iio/adc/ltc2497.h
@@ -4,9 +4,15 @@
#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
#define LTC2497_CONVERSION_TIME_MS 150ULL
+struct ltc2497_chip_info {
+ u32 resolution;
+ const char *name;
+};
+
struct ltc2497core_driverdata {
struct regulator *ref;
ktime_t time_prev;
+ const struct ltc2497_chip_info *chip_info;
u8 addr_prev;
int (*result_and_measure)(struct ltc2497core_driverdata *ddata,
u8 address, int *val);
diff --git a/drivers/iio/adc/max11205.c b/drivers/iio/adc/max11205.c
new file mode 100644
index 000000000000..65fc32971ba5
--- /dev/null
+++ b/drivers/iio/adc/max11205.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Maxim MAX11205 16-Bit Delta-Sigma ADC
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1240-max11205.pdf
+ * Copyright (C) 2022 Analog Devices, Inc.
+ * Author: Ramona Bolboaca <ramona.bolboaca@analog.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/adc/ad_sigma_delta.h>
+
+#define MAX11205_BIT_SCALE 15
+#define MAX11205A_OUT_DATA_RATE 116
+#define MAX11205B_OUT_DATA_RATE 13
+
+enum max11205_chip_type {
+ TYPE_MAX11205A,
+ TYPE_MAX11205B,
+};
+
+struct max11205_chip_info {
+ unsigned int out_data_rate;
+ const char *name;
+};
+
+struct max11205_state {
+ const struct max11205_chip_info *chip_info;
+ struct regulator *vref;
+ struct ad_sigma_delta sd;
+};
+
+static const struct ad_sigma_delta_info max11205_sigma_delta_info = {
+ .has_registers = false,
+};
+
+static int max11205_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max11205_state *st = iio_priv(indio_dev);
+ int reg_mv;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ reg_mv = regulator_get_voltage(st->vref);
+ if (reg_mv < 0)
+ return reg_mv;
+ reg_mv /= 1000;
+ *val = reg_mv;
+ *val2 = MAX11205_BIT_SCALE;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->chip_info->out_data_rate;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max11205_iio_info = {
+ .read_raw = max11205_read_raw,
+ .validate_trigger = ad_sd_validate_trigger,
+};
+
+static const struct iio_chan_spec max11205_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct max11205_chip_info max11205_chip_info[] = {
+ [TYPE_MAX11205A] = {
+ .out_data_rate = MAX11205A_OUT_DATA_RATE,
+ .name = "max11205a",
+ },
+ [TYPE_MAX11205B] = {
+ .out_data_rate = MAX11205B_OUT_DATA_RATE,
+ .name = "max11205b",
+ },
+};
+
+static void max11205_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static int max11205_probe(struct spi_device *spi)
+{
+ struct max11205_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ ad_sd_init(&st->sd, indio_dev, spi, &max11205_sigma_delta_info);
+
+ st->chip_info = device_get_match_data(&spi->dev);
+ if (!st->chip_info)
+ st->chip_info =
+ (const struct max11205_chip_info *)spi_get_device_id(spi)->driver_data;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max11205_channels;
+ indio_dev->num_channels = 1;
+ indio_dev->info = &max11205_iio_info;
+
+ st->vref = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->vref))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->vref),
+ "Failed to get vref regulator\n");
+
+ ret = regulator_enable(st->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, max11205_reg_disable, st->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id max11205_spi_ids[] = {
+ { "max11205a", (kernel_ulong_t)&max11205_chip_info[TYPE_MAX11205A] },
+ { "max11205b", (kernel_ulong_t)&max11205_chip_info[TYPE_MAX11205B] },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max11205_spi_ids);
+
+static const struct of_device_id max11205_dt_ids[] = {
+ {
+ .compatible = "maxim,max11205a",
+ .data = &max11205_chip_info[TYPE_MAX11205A],
+ },
+ {
+ .compatible = "maxim,max11205b",
+ .data = &max11205_chip_info[TYPE_MAX11205B],
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max11205_dt_ids);
+
+static struct spi_driver max11205_spi_driver = {
+ .driver = {
+ .name = "max11205",
+ .of_match_table = max11205_dt_ids,
+ },
+ .probe = max11205_probe,
+ .id_table = max11205_spi_ids,
+};
+module_spi_driver(max11205_spi_driver);
+
+MODULE_AUTHOR("Ramona Bolboaca <ramona.bolboaca@analog.com>");
+MODULE_DESCRIPTION("MAX11205 ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_AD_SIGMA_DELTA);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index eef55ed4814a..a28cf86cdce8 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -29,7 +29,6 @@
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/driver.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -1595,11 +1594,6 @@ static int max1363_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
- ret = devm_iio_map_array_register(&client->dev, indio_dev,
- client->dev.platform_data);
- if (ret < 0)
- return ret;
-
st = iio_priv(indio_dev);
mutex_init(&st->lock);
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 890af7dca62d..b35fd2c9c3c0 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -5,16 +5,25 @@
* Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
* Copyright (C) 2018 Kent Gustavsson <kent@minoris.se>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/trigger.h>
+
+#include <asm/unaligned.h>
+
#define MCP3911_REG_CHANNEL0 0x00
#define MCP3911_REG_CHANNEL1 0x03
#define MCP3911_REG_MOD 0x06
@@ -22,6 +31,8 @@
#define MCP3911_REG_GAIN 0x09
#define MCP3911_REG_STATUSCOM 0x0a
+#define MCP3911_STATUSCOM_DRHIZ BIT(12)
+#define MCP3911_STATUSCOM_READ GENMASK(7, 6)
#define MCP3911_STATUSCOM_CH1_24WIDTH BIT(4)
#define MCP3911_STATUSCOM_CH0_24WIDTH BIT(3)
#define MCP3911_STATUSCOM_EN_OFFCAL BIT(2)
@@ -30,6 +41,7 @@
#define MCP3911_REG_CONFIG 0x0c
#define MCP3911_CONFIG_CLKEXT BIT(1)
#define MCP3911_CONFIG_VREFEXT BIT(2)
+#define MCP3911_CONFIG_OSR GENMASK(13, 11)
#define MCP3911_REG_OFFCAL_CH0 0x0e
#define MCP3911_REG_GAINCAL_CH0 0x11
@@ -48,12 +60,22 @@
#define MCP3911_NUM_CHANNELS 2
+static const int mcp3911_osr_table[] = { 32, 64, 128, 256, 512, 1024, 2048, 4096 };
+
struct mcp3911 {
struct spi_device *spi;
struct mutex lock;
struct regulator *vref;
struct clk *clki;
u32 dev_addr;
+ struct iio_trigger *trig;
+ struct {
+ u32 channels[MCP3911_NUM_CHANNELS];
+ s64 ts __aligned(8);
+ } scan;
+
+ u8 tx_buf __aligned(IIO_DMA_MINALIGN);
+ u8 rx_buf[MCP3911_NUM_CHANNELS * 3];
};
static int mcp3911_read(struct mcp3911 *adc, u8 reg, u32 *val, u8 len)
@@ -98,6 +120,36 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask,
return mcp3911_write(adc, reg, val, len);
}
+static int mcp3911_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+}
+
+static int mcp3911_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = mcp3911_osr_table;
+ *length = ARRAY_SIZE(mcp3911_osr_table);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int mcp3911_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long mask)
@@ -126,6 +178,15 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mcp3911_read(adc, MCP3911_REG_CONFIG, val, 2);
+ if (ret)
+ goto out;
+
+ *val = FIELD_GET(MCP3911_CONFIG_OSR, *val);
+ *val = 32 << *val;
+ ret = IIO_VAL_INT;
+ break;
case IIO_CHAN_INFO_SCALE:
if (adc->vref) {
@@ -185,6 +246,17 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
MCP3911_STATUSCOM_EN_OFFCAL,
MCP3911_STATUSCOM_EN_OFFCAL, 2);
break;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ for (int i = 0; i < sizeof(mcp3911_osr_table); i++) {
+ if (val == mcp3911_osr_table[i]) {
+ val = FIELD_PREP(MCP3911_CONFIG_OSR, i);
+ ret = mcp3911_update(adc, MCP3911_REG_CONFIG, MCP3911_CONFIG_OSR,
+ val, 2);
+ break;
+ }
+ }
+ break;
}
out:
@@ -196,25 +268,80 @@ out:
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = idx, \
+ .scan_index = idx, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 24, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec mcp3911_channels[] = {
MCP3911_CHAN(0),
MCP3911_CHAN(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mcp3911 *adc = iio_priv(indio_dev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = &adc->tx_buf,
+ .len = 1,
+ }, {
+ .rx_buf = adc->rx_buf,
+ .len = sizeof(adc->rx_buf),
+ },
+ };
+ int scan_index;
+ int i = 0;
+ int ret;
+
+ mutex_lock(&adc->lock);
+ adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr);
+ ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer));
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask, indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index];
+
+ adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]);
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info mcp3911_info = {
.read_raw = mcp3911_read_raw,
.write_raw = mcp3911_write_raw,
+ .read_avail = mcp3911_read_avail,
+ .write_raw_get_fmt = mcp3911_write_raw_get_fmt,
};
static int mcp3911_config(struct mcp3911 *adc)
{
struct device *dev = &adc->spi->dev;
- u32 configreg;
+ u32 regval;
int ret;
ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
@@ -233,31 +360,67 @@ static int mcp3911_config(struct mcp3911 *adc)
}
dev_dbg(&adc->spi->dev, "use device address %i\n", adc->dev_addr);
- ret = mcp3911_read(adc, MCP3911_REG_CONFIG, &configreg, 2);
+ ret = mcp3911_read(adc, MCP3911_REG_CONFIG, &regval, 2);
if (ret)
return ret;
+ regval &= ~MCP3911_CONFIG_VREFEXT;
if (adc->vref) {
dev_dbg(&adc->spi->dev, "use external voltage reference\n");
- configreg |= MCP3911_CONFIG_VREFEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1);
} else {
dev_dbg(&adc->spi->dev,
"use internal voltage reference (1.2V)\n");
- configreg &= ~MCP3911_CONFIG_VREFEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 0);
}
+ regval &= ~MCP3911_CONFIG_CLKEXT;
if (adc->clki) {
dev_dbg(&adc->spi->dev, "use external clock as clocksource\n");
- configreg |= MCP3911_CONFIG_CLKEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 1);
} else {
dev_dbg(&adc->spi->dev,
"use crystal oscillator as clocksource\n");
- configreg &= ~MCP3911_CONFIG_CLKEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 0);
}
- return mcp3911_write(adc, MCP3911_REG_CONFIG, configreg, 2);
+ ret = mcp3911_write(adc, MCP3911_REG_CONFIG, regval, 2);
+ if (ret)
+ return ret;
+
+ ret = mcp3911_read(adc, MCP3911_REG_STATUSCOM, &regval, 2);
+ if (ret)
+ return ret;
+
+ /* Address counter incremented, cycle through register types */
+ regval &= ~MCP3911_STATUSCOM_READ;
+ regval |= FIELD_PREP(MCP3911_STATUSCOM_READ, 0x02);
+
+ return mcp3911_write(adc, MCP3911_REG_STATUSCOM, regval, 2);
}
+static void mcp3911_cleanup_regulator(void *vref)
+{
+ regulator_disable(vref);
+}
+
+static int mcp3911_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct mcp3911 *adc = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ enable_irq(adc->spi->irq);
+ else
+ disable_irq(adc->spi->irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mcp3911_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+ .set_trigger_state = mcp3911_set_trigger_state,
+};
+
static int mcp3911_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -286,9 +449,14 @@ static int mcp3911_probe(struct spi_device *spi)
ret = regulator_enable(adc->vref);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ mcp3911_cleanup_regulator, adc->vref);
+ if (ret)
+ return ret;
}
- adc->clki = devm_clk_get(&adc->spi->dev, NULL);
+ adc->clki = devm_clk_get_enabled(&adc->spi->dev, NULL);
if (IS_ERR(adc->clki)) {
if (PTR_ERR(adc->clki) == -ENOENT) {
adc->clki = NULL;
@@ -296,21 +464,22 @@ static int mcp3911_probe(struct spi_device *spi)
dev_err(&adc->spi->dev,
"failed to get adc clk (%ld)\n",
PTR_ERR(adc->clki));
- ret = PTR_ERR(adc->clki);
- goto reg_disable;
- }
- } else {
- ret = clk_prepare_enable(adc->clki);
- if (ret < 0) {
- dev_err(&adc->spi->dev,
- "Failed to enable clki: %d\n", ret);
- goto reg_disable;
+ return PTR_ERR(adc->clki);
}
}
ret = mcp3911_config(adc);
if (ret)
- goto clk_disable;
+ return ret;
+
+ if (device_property_read_bool(&adc->spi->dev, "microchip,data-ready-hiz"))
+ ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
+ 0, 2);
+ else
+ ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
+ MCP3911_STATUSCOM_DRHIZ, 2);
+ if (ret)
+ return ret;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -322,31 +491,38 @@ static int mcp3911_probe(struct spi_device *spi)
mutex_init(&adc->lock);
- ret = iio_device_register(indio_dev);
- if (ret)
- goto clk_disable;
-
- return ret;
-
-clk_disable:
- clk_disable_unprepare(adc->clki);
-reg_disable:
- if (adc->vref)
- regulator_disable(adc->vref);
+ if (spi->irq > 0) {
+ adc->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!adc->trig)
+ return PTR_ERR(adc->trig);
- return ret;
-}
+ adc->trig->ops = &mcp3911_trigger_ops;
+ iio_trigger_set_drvdata(adc->trig, adc);
+ ret = devm_iio_trigger_register(&spi->dev, adc->trig);
+ if (ret)
+ return ret;
-static void mcp3911_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp3911 *adc = iio_priv(indio_dev);
+ /*
+ * The device generates interrupts as long as it is powered up.
+ * Some platforms might not allow the option to power it down so
+ * don't enable the interrupt to avoid extra load on the system.
+ */
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ &iio_trigger_generic_data_rdy_poll, IRQF_NO_AUTOEN | IRQF_ONESHOT,
+ indio_dev->name, adc->trig);
+ if (ret)
+ return ret;
+ }
- iio_device_unregister(indio_dev);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ NULL,
+ mcp3911_trigger_handler, NULL);
+ if (ret)
+ return ret;
- clk_disable_unprepare(adc->clki);
- if (adc->vref)
- regulator_disable(adc->vref);
+ return devm_iio_device_register(&adc->spi->dev, indio_dev);
}
static const struct of_device_id mcp3911_dt_ids[] = {
@@ -367,7 +543,6 @@ static struct spi_driver mcp3911_driver = {
.of_match_table = mcp3911_dt_ids,
},
.probe = mcp3911_probe,
- .remove = mcp3911_remove,
.id_table = mcp3911_id,
};
module_spi_driver(mcp3911_driver);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 35260d9e4e47..3710473e526f 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -353,7 +353,7 @@ static int mt6360_adc_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static const struct of_device_id __maybe_unused mt6360_adc_of_id[] = {
+static const struct of_device_id mt6360_adc_of_id[] = {
{ .compatible = "mediatek,mt6360-adc", },
{}
};
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 5e9e56821075..eb424496ee1d 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -14,9 +14,9 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -694,8 +694,8 @@ static int pm8xxx_read_raw(struct iio_dev *indio_dev,
}
}
-static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int pm8xxx_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
u8 pre_scale_mux;
@@ -706,10 +706,10 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
* First cell is prescaler or premux, second cell is analog
* mux.
*/
- if (iiospec->args_count != 2) {
- dev_err(&indio_dev->dev, "wrong number of arguments for %pOFn need 2 got %d\n",
- iiospec->np,
- iiospec->args_count);
+ if (iiospec->nargs != 2) {
+ dev_err(&indio_dev->dev, "wrong number of arguments for %pfwP need 2 got %d\n",
+ iiospec->fwnode,
+ iiospec->nargs);
return -EINVAL;
}
pre_scale_mux = (u8)iiospec->args[0];
@@ -727,34 +727,34 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info pm8xxx_xoadc_info = {
- .of_xlate = pm8xxx_of_xlate,
+ .fwnode_xlate = pm8xxx_fwnode_xlate,
.read_raw = pm8xxx_read_raw,
};
static int pm8xxx_xoadc_parse_channel(struct device *dev,
- struct device_node *np,
+ struct fwnode_handle *fwnode,
const struct xoadc_channel *hw_channels,
struct iio_chan_spec *iio_chan,
struct pm8xxx_chan_info *ch)
{
- const char *name = np->name;
+ const char *name = fwnode_get_name(fwnode);
const struct xoadc_channel *hwchan;
- u32 pre_scale_mux, amux_channel;
+ u32 pre_scale_mux, amux_channel, reg[2];
u32 rsv, dec;
int ret;
int chid;
- ret = of_property_read_u32_index(np, "reg", 0, &pre_scale_mux);
+ ret = fwnode_property_read_u32_array(fwnode, "reg", reg,
+ ARRAY_SIZE(reg));
if (ret) {
- dev_err(dev, "invalid pre scale/mux number %s\n", name);
- return ret;
- }
- ret = of_property_read_u32_index(np, "reg", 1, &amux_channel);
- if (ret) {
- dev_err(dev, "invalid amux channel number %s\n", name);
+ dev_err(dev, "invalid pre scale/mux or amux channel number %s\n",
+ name);
return ret;
}
+ pre_scale_mux = reg[0];
+ amux_channel = reg[1];
+
/* Find the right channel setting */
chid = 0;
hwchan = &hw_channels[0];
@@ -778,7 +778,7 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
/* Everyone seems to use default ("type 2") decimation */
ch->decimation = VADC_DEF_DECIMATION;
- if (!of_property_read_u32(np, "qcom,ratiometric", &rsv)) {
+ if (!fwnode_property_read_u32(fwnode, "qcom,ratiometric", &rsv)) {
ch->calibration = VADC_CALIB_RATIOMETRIC;
if (rsv > XOADC_RSV_MAX) {
dev_err(dev, "%s too large RSV value %d\n", name, rsv);
@@ -791,7 +791,7 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
}
/* Optional decimation, if omitted we use the default */
- ret = of_property_read_u32(np, "qcom,decimation", &dec);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &dec);
if (!ret) {
ret = qcom_vadc_decimation_from_dt(dec);
if (ret < 0) {
@@ -820,15 +820,14 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
return 0;
}
-static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc,
- struct device_node *np)
+static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc)
{
- struct device_node *child;
+ struct fwnode_handle *child;
struct pm8xxx_chan_info *ch;
int ret;
int i;
- adc->nchans = of_get_available_child_count(np);
+ adc->nchans = device_get_child_node_count(adc->dev);
if (!adc->nchans) {
dev_err(adc->dev, "no channel children\n");
return -ENODEV;
@@ -846,14 +845,14 @@ static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc,
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ device_for_each_child_node(adc->dev, child) {
ch = &adc->chans[i];
ret = pm8xxx_xoadc_parse_channel(adc->dev, child,
adc->variant->channels,
&adc->iio_chans[i],
ch);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
i++;
@@ -884,12 +883,11 @@ static int pm8xxx_xoadc_probe(struct platform_device *pdev)
const struct xoadc_variant *variant;
struct pm8xxx_xoadc *adc;
struct iio_dev *indio_dev;
- struct device_node *np = pdev->dev.of_node;
struct regmap *map;
struct device *dev = &pdev->dev;
int ret;
- variant = of_device_get_match_data(dev);
+ variant = device_get_match_data(dev);
if (!variant)
return -ENODEV;
@@ -904,7 +902,7 @@ static int pm8xxx_xoadc_probe(struct platform_device *pdev)
init_completion(&adc->complete);
mutex_init(&adc->lock);
- ret = pm8xxx_xoadc_parse_channels(adc, np);
+ ret = pm8xxx_xoadc_parse_channels(adc);
if (ret)
return ret;
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 87438d1e5c0b..821fee60a765 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -14,9 +14,9 @@
#include <linux/log2.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -403,8 +403,8 @@ static irqreturn_t adc5_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int adc5_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int adc5_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct adc5_chip *adc = iio_priv(indio_dev);
int i;
@@ -416,8 +416,8 @@ static int adc5_of_xlate(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int adc7_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int adc7_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct adc5_chip *adc = iio_priv(indio_dev);
int i, v_channel;
@@ -481,12 +481,12 @@ static int adc7_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adc5_info = {
.read_raw = adc5_read_raw,
- .of_xlate = adc5_of_xlate,
+ .fwnode_xlate = adc5_fwnode_xlate,
};
static const struct iio_info adc7_info = {
.read_raw = adc7_read_raw,
- .of_xlate = adc7_of_xlate,
+ .fwnode_xlate = adc7_fwnode_xlate,
};
struct adc5_channels {
@@ -526,6 +526,8 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_DEFAULT)
[ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
SCALE_HW_CALIB_DEFAULT)
+ [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 1,
+ SCALE_HW_CALIB_DEFAULT)
[ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
SCALE_HW_CALIB_PMIC_THERM)
[ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 0,
@@ -549,6 +551,12 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 0,
SCALE_HW_CALIB_PM5_SMB_TEMP)
+ [ADC5_GPIO1_100K_PU] = ADC5_CHAN_TEMP("gpio1_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ADC5_GPIO3_100K_PU] = ADC5_CHAN_TEMP("gpio3_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ADC5_GPIO4_100K_PU] = ADC5_CHAN_TEMP("gpio4_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
};
static const struct adc5_channels adc7_chans_pmic[ADC5_MAX_CHANNEL] = {
@@ -589,6 +597,8 @@ static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_DEFAULT)
[ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
SCALE_HW_CALIB_DEFAULT)
+ [ADC5_VREF_VADC] = ADC5_CHAN_VOLT("vref_vadc", 0,
+ SCALE_HW_CALIB_DEFAULT)
[ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
SCALE_HW_CALIB_DEFAULT)
[ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
@@ -611,18 +621,18 @@ static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_THERM_100K_PULLUP)
};
-static int adc5_get_dt_channel_data(struct adc5_chip *adc,
+static int adc5_get_fw_channel_data(struct adc5_chip *adc,
struct adc5_channel_prop *prop,
- struct device_node *node,
+ struct fwnode_handle *fwnode,
const struct adc5_data *data)
{
- const char *name = node->name, *channel_name;
+ const char *name = fwnode_get_name(fwnode), *channel_name;
u32 chan, value, varr[2];
u32 sid = 0;
int ret;
struct device *dev = adc->dev;
- ret = of_property_read_u32(node, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -647,15 +657,13 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->channel = chan;
prop->sid = sid;
- channel_name = of_get_property(node,
- "label", NULL) ? : node->name;
- if (!channel_name) {
- dev_err(dev, "Invalid channel name\n");
- return -EINVAL;
- }
+ ret = fwnode_property_read_string(fwnode, "label", &channel_name);
+ if (ret)
+ channel_name = name;
+
prop->datasheet_name = channel_name;
- ret = of_property_read_u32(node, "qcom,decimation", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &value);
if (!ret) {
ret = qcom_adc5_decimation_from_dt(value, data->decimation);
if (ret < 0) {
@@ -668,7 +676,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->decimation = ADC5_DECIMATION_DEFAULT;
}
- ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ ret = fwnode_property_read_u32_array(fwnode, "qcom,pre-scaling", varr, 2);
if (!ret) {
ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
@@ -682,7 +690,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
adc->data->adc_chans[prop->channel].prescale_index;
}
- ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,hw-settle-time", &value);
if (!ret) {
u8 dig_version[2];
@@ -713,7 +721,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
}
- ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,avg-samples", &value);
if (!ret) {
ret = qcom_adc5_avg_samples_from_dt(value);
if (ret < 0) {
@@ -726,7 +734,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->avg_samples = VADC_DEF_AVG_SAMPLES;
}
- if (of_property_read_bool(node, "qcom,ratiometric"))
+ if (fwnode_property_read_bool(fwnode, "qcom,ratiometric"))
prop->cal_method = ADC5_RATIOMETRIC_CAL;
else
prop->cal_method = ADC5_ABSOLUTE_CAL;
@@ -801,16 +809,16 @@ static const struct of_device_id adc5_match_table[] = {
};
MODULE_DEVICE_TABLE(of, adc5_match_table);
-static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
+static int adc5_get_fw_data(struct adc5_chip *adc)
{
const struct adc5_channels *adc_chan;
struct iio_chan_spec *iio_chan;
struct adc5_channel_prop prop, *chan_props;
- struct device_node *child;
+ struct fwnode_handle *child;
unsigned int index = 0;
int ret;
- adc->nchannels = of_get_available_child_count(node);
+ adc->nchannels = device_get_child_node_count(adc->dev);
if (!adc->nchannels)
return -EINVAL;
@@ -826,14 +834,14 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
chan_props = adc->chan_props;
iio_chan = adc->iio_chans;
- adc->data = of_device_get_match_data(adc->dev);
+ adc->data = device_get_match_data(adc->dev);
if (!adc->data)
adc->data = &adc5_data_pmic;
- for_each_available_child_of_node(node, child) {
- ret = adc5_get_dt_channel_data(adc, &prop, child, adc->data);
+ device_for_each_child_node(adc->dev, child) {
+ ret = adc5_get_fw_channel_data(adc, &prop, child, adc->data);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
@@ -858,7 +866,6 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
static int adc5_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct adc5_chip *adc;
@@ -870,7 +877,7 @@ static int adc5_probe(struct platform_device *pdev)
if (!regmap)
return -ENODEV;
- ret = of_property_read_u32(node, "reg", &reg);
+ ret = device_property_read_u32(dev, "reg", &reg);
if (ret < 0)
return ret;
@@ -886,7 +893,7 @@ static int adc5_probe(struct platform_device *pdev)
init_completion(&adc->complete);
mutex_init(&adc->lock);
- ret = adc5_get_dt_data(adc, node);
+ ret = adc5_get_fw_data(adc);
if (ret) {
dev_err(dev, "adc get dt data failed\n");
return ret;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 34202ba52469..bcff0f62b70e 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -481,8 +482,8 @@ static int vadc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int vadc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int vadc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct vadc_priv *vadc = iio_priv(indio_dev);
unsigned int i;
@@ -496,7 +497,7 @@ static int vadc_of_xlate(struct iio_dev *indio_dev,
static const struct iio_info vadc_info = {
.read_raw = vadc_read_raw,
- .of_xlate = vadc_of_xlate,
+ .fwnode_xlate = vadc_fwnode_xlate,
};
struct vadc_channels {
@@ -647,15 +648,15 @@ static const struct vadc_channels vadc_chans[] = {
VADC_CHAN_NO_SCALE(LR_MUX3_BUF_PU1_PU2_XO_THERM, 0)
};
-static int vadc_get_dt_channel_data(struct device *dev,
+static int vadc_get_fw_channel_data(struct device *dev,
struct vadc_channel_prop *prop,
- struct device_node *node)
+ struct fwnode_handle *fwnode)
{
- const char *name = node->name;
+ const char *name = fwnode_get_name(fwnode);
u32 chan, value, varr[2];
int ret;
- ret = of_property_read_u32(node, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -669,7 +670,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
/* the channel has DT description */
prop->channel = chan;
- ret = of_property_read_u32(node, "qcom,decimation", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &value);
if (!ret) {
ret = qcom_vadc_decimation_from_dt(value);
if (ret < 0) {
@@ -682,7 +683,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->decimation = VADC_DEF_DECIMATION;
}
- ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ ret = fwnode_property_read_u32_array(fwnode, "qcom,pre-scaling", varr, 2);
if (!ret) {
ret = vadc_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
@@ -695,7 +696,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->prescale = vadc_chans[prop->channel].prescale_index;
}
- ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,hw-settle-time", &value);
if (!ret) {
ret = vadc_hw_settle_time_from_dt(value);
if (ret < 0) {
@@ -708,7 +709,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
}
- ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,avg-samples", &value);
if (!ret) {
ret = vadc_avg_samples_from_dt(value);
if (ret < 0) {
@@ -721,7 +722,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->avg_samples = VADC_DEF_AVG_SAMPLES;
}
- if (of_property_read_bool(node, "qcom,ratiometric"))
+ if (fwnode_property_read_bool(fwnode, "qcom,ratiometric"))
prop->calibration = VADC_CALIB_RATIOMETRIC;
else
prop->calibration = VADC_CALIB_ABSOLUTE;
@@ -731,16 +732,16 @@ static int vadc_get_dt_channel_data(struct device *dev,
return 0;
}
-static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
+static int vadc_get_fw_data(struct vadc_priv *vadc)
{
const struct vadc_channels *vadc_chan;
struct iio_chan_spec *iio_chan;
struct vadc_channel_prop prop;
- struct device_node *child;
+ struct fwnode_handle *child;
unsigned int index = 0;
int ret;
- vadc->nchannels = of_get_available_child_count(node);
+ vadc->nchannels = device_get_child_node_count(vadc->dev);
if (!vadc->nchannels)
return -EINVAL;
@@ -756,10 +757,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
iio_chan = vadc->iio_chans;
- for_each_available_child_of_node(node, child) {
- ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
+ device_for_each_child_node(vadc->dev, child) {
+ ret = vadc_get_fw_channel_data(vadc->dev, &prop, child);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
@@ -848,7 +849,6 @@ static int vadc_check_revision(struct vadc_priv *vadc)
static int vadc_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct vadc_priv *vadc;
@@ -860,7 +860,7 @@ static int vadc_probe(struct platform_device *pdev)
if (!regmap)
return -ENODEV;
- ret = of_property_read_u32(node, "reg", &reg);
+ ret = device_property_read_u32(dev, "reg", &reg);
if (ret < 0)
return ret;
@@ -880,7 +880,7 @@ static int vadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = vadc_get_dt_data(vadc, node);
+ ret = vadc_get_fw_data(vadc);
if (ret)
return ret;
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
new file mode 100644
index 000000000000..c1b2e8dc9a26
--- /dev/null
+++ b/drivers/iio/adc/rtq6056.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Richtek Technology Corp.
+ *
+ * ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define RTQ6056_REG_CONFIG 0x00
+#define RTQ6056_REG_SHUNTVOLT 0x01
+#define RTQ6056_REG_BUSVOLT 0x02
+#define RTQ6056_REG_POWER 0x03
+#define RTQ6056_REG_CURRENT 0x04
+#define RTQ6056_REG_CALIBRATION 0x05
+#define RTQ6056_REG_MASKENABLE 0x06
+#define RTQ6056_REG_ALERTLIMIT 0x07
+#define RTQ6056_REG_MANUFACTID 0xFE
+#define RTQ6056_REG_DIEID 0xFF
+
+#define RTQ6056_VENDOR_ID 0x1214
+#define RTQ6056_DEFAULT_CONFIG 0x4127
+#define RTQ6056_CONT_ALLON 7
+
+enum {
+ RTQ6056_CH_VSHUNT = 0,
+ RTQ6056_CH_VBUS,
+ RTQ6056_CH_POWER,
+ RTQ6056_CH_CURRENT,
+ RTQ6056_MAX_CHANNEL
+};
+
+enum {
+ F_OPMODE = 0,
+ F_VSHUNTCT,
+ F_VBUSCT,
+ F_AVG,
+ F_RESET,
+ F_MAX_FIELDS
+};
+
+struct rtq6056_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rm_fields[F_MAX_FIELDS];
+ u32 shunt_resistor_uohm;
+ int vshuntct_us;
+ int vbusct_us;
+ int avg_sample;
+};
+
+static const struct reg_field rtq6056_reg_fields[F_MAX_FIELDS] = {
+ [F_OPMODE] = REG_FIELD(RTQ6056_REG_CONFIG, 0, 2),
+ [F_VSHUNTCT] = REG_FIELD(RTQ6056_REG_CONFIG, 3, 5),
+ [F_VBUSCT] = REG_FIELD(RTQ6056_REG_CONFIG, 6, 8),
+ [F_AVG] = REG_FIELD(RTQ6056_REG_CONFIG, 9, 11),
+ [F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
+};
+
+static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = RTQ6056_REG_SHUNTVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = RTQ6056_REG_BUSVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+ .address = RTQ6056_REG_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 3,
+ .address = RTQ6056_REG_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
+};
+
+static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch,
+ int *val)
+{
+ struct device *dev = priv->dev;
+ unsigned int addr = ch->address;
+ unsigned int regval;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = regmap_read(priv->regmap, addr, &regval);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
+ if (ret)
+ return ret;
+
+ /* Power and VBUS is unsigned 16-bit, others are signed 16-bit */
+ if (addr == RTQ6056_REG_BUSVOLT || addr == RTQ6056_REG_POWER)
+ *val = regval;
+ else
+ *val = sign_extend32(regval, 16);
+
+ return IIO_VAL_INT;
+}
+
+static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
+ int *val2)
+{
+ switch (ch->address) {
+ case RTQ6056_REG_SHUNTVOLT:
+ /* VSHUNT lsb 2.5uV */
+ *val = 2500;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_BUSVOLT:
+ /* VBUS lsb 1.25mV */
+ *val = 1250;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_POWER:
+ /* Power lsb 25mW */
+ *val = 25;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Sample frequency for channel VSHUNT and VBUS. The indices correspond
+ * with the bit value expected by the chip. And it can be found at
+ * https://www.richtek.com/assets/product_file/RTQ6056/DSQ6056-00.pdf
+ */
+static const int rtq6056_samp_freq_list[] = {
+ 7194, 4926, 3717, 1904, 964, 485, 243, 122,
+};
+
+static int rtq6056_adc_set_samp_freq(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch, int val)
+{
+ struct regmap_field *rm_field;
+ unsigned int selector;
+ int *ct, ret;
+
+ if (val > 7194 || val < 122)
+ return -EINVAL;
+
+ if (ch->address == RTQ6056_REG_SHUNTVOLT) {
+ rm_field = priv->rm_fields[F_VSHUNTCT];
+ ct = &priv->vshuntct_us;
+ } else if (ch->address == RTQ6056_REG_BUSVOLT) {
+ rm_field = priv->rm_fields[F_VBUSCT];
+ ct = &priv->vbusct_us;
+ } else
+ return -EINVAL;
+
+ selector = find_closest_descending(val, rtq6056_samp_freq_list,
+ ARRAY_SIZE(rtq6056_samp_freq_list));
+
+ ret = regmap_field_write(rm_field, selector);
+ if (ret)
+ return ret;
+
+ *ct = 1000000 / rtq6056_samp_freq_list[selector];
+
+ return 0;
+}
+
+/*
+ * Available averaging rate for rtq6056. The indices correspond with the bit
+ * value expected by the chip. And it can be found at
+ * https://www.richtek.com/assets/product_file/RTQ6056/DSQ6056-00.pdf
+ */
+static const int rtq6056_avg_sample_list[] = {
+ 1, 4, 16, 64, 128, 256, 512, 1024,
+};
+
+static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
+{
+ unsigned int selector;
+ int ret;
+
+ if (val > 1024 || val < 1)
+ return -EINVAL;
+
+ selector = find_closest(val, rtq6056_avg_sample_list,
+ ARRAY_SIZE(rtq6056_avg_sample_list));
+
+ ret = regmap_field_write(priv->rm_fields[F_AVG], selector);
+ if (ret)
+ return ret;
+
+ priv->avg_sample = rtq6056_avg_sample_list[selector];
+
+ return 0;
+}
+
+static int rtq6056_adc_get_sample_freq(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int sample_time;
+
+ if (ch->address == RTQ6056_REG_SHUNTVOLT)
+ sample_time = priv->vshuntct_us;
+ else if (ch->address == RTQ6056_REG_BUSVOLT)
+ sample_time = priv->vbusct_us;
+ else {
+ sample_time = priv->vshuntct_us + priv->vbusct_us;
+ sample_time *= priv->avg_sample;
+ }
+
+ *val = 1000000 / sample_time;
+
+ return IIO_VAL_INT;
+}
+
+static int rtq6056_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return rtq6056_adc_read_channel(priv, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return rtq6056_adc_read_scale(chan, val, val2);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = priv->avg_sample;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return rtq6056_adc_get_sample_freq(priv, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = rtq6056_samp_freq_list;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(rtq6056_samp_freq_list);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = rtq6056_avg_sample_list;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(rtq6056_avg_sample_list);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = rtq6056_adc_set_samp_freq(priv, chan, val);
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = rtq6056_adc_set_average(priv, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static const char *rtq6056_channel_labels[RTQ6056_MAX_CHANNEL] = {
+ [RTQ6056_CH_VSHUNT] = "Vshunt",
+ [RTQ6056_CH_VBUS] = "Vbus",
+ [RTQ6056_CH_POWER] = "Power",
+ [RTQ6056_CH_CURRENT] = "Current",
+};
+
+static int rtq6056_adc_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ return sysfs_emit(label, "%s\n", rtq6056_channel_labels[chan->channel]);
+}
+
+static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
+ int resistor_uohm)
+{
+ unsigned int calib_val;
+ int ret;
+
+ if (resistor_uohm <= 0) {
+ dev_err(priv->dev, "Invalid resistor [%d]\n", resistor_uohm);
+ return -EINVAL;
+ }
+
+ /* calibration = 5120000 / (Rshunt (uOhm) * current lsb (1mA)) */
+ calib_val = 5120000 / resistor_uohm;
+ ret = regmap_write(priv->regmap, RTQ6056_REG_CALIBRATION, calib_val);
+ if (ret)
+ return ret;
+
+ priv->shunt_resistor_uohm = resistor_uohm;
+
+ return 0;
+}
+
+static ssize_t shunt_resistor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rtq6056_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int vals[2] = { priv->shunt_resistor_uohm, 1000000 };
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
+}
+
+static ssize_t shunt_resistor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ int val, val_fract, ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
+ if (ret)
+ goto out_store;
+
+ ret = rtq6056_set_shunt_resistor(priv, val * 1000000 + val_fract);
+
+out_store:
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret ?: len;
+}
+
+static IIO_DEVICE_ATTR_RW(shunt_resistor, 0);
+
+static struct attribute *rtq6056_attributes[] = {
+ &iio_dev_attr_shunt_resistor.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group rtq6056_attribute_group = {
+ .attrs = rtq6056_attributes,
+};
+
+static const struct iio_info rtq6056_info = {
+ .attrs = &rtq6056_attribute_group,
+ .read_raw = rtq6056_adc_read_raw,
+ .read_avail = rtq6056_adc_read_avail,
+ .write_raw = rtq6056_adc_write_raw,
+ .read_label = rtq6056_adc_read_label,
+};
+
+static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ struct device *dev = priv->dev;
+ struct {
+ u16 vals[RTQ6056_MAX_CHANNEL];
+ s64 timestamp __aligned(8);
+ } data;
+ unsigned int raw;
+ int i = 0, bit, ret;
+
+ memset(&data, 0, sizeof(data));
+
+ pm_runtime_get_sync(dev);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ unsigned int addr = rtq6056_channels[bit].address;
+
+ ret = regmap_read(priv->regmap, addr, &raw);
+ if (ret)
+ goto out;
+
+ data.vals[i++] = raw;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data, iio_get_time_ns(indio_dev));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void rtq6056_enter_shutdown_state(void *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+
+ /* Enter shutdown state */
+ regmap_field_write(priv->rm_fields[F_OPMODE], 0);
+}
+
+static bool rtq6056_is_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RTQ6056_REG_CONFIG ... RTQ6056_REG_ALERTLIMIT:
+ case RTQ6056_REG_MANUFACTID ... RTQ6056_REG_DIEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rtq6056_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RTQ6056_REG_CONFIG:
+ case RTQ6056_REG_CALIBRATION ... RTQ6056_REG_ALERTLIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rtq6056_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = RTQ6056_REG_DIEID,
+ .readable_reg = rtq6056_is_readable_reg,
+ .writeable_reg = rtq6056_is_writeable_reg,
+};
+
+static int rtq6056_probe(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev;
+ struct rtq6056_priv *priv;
+ struct device *dev = &i2c->dev;
+ struct regmap *regmap;
+ unsigned int vendor_id, shunt_resistor_uohm;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dev = dev;
+ priv->vshuntct_us = priv->vbusct_us = 1037;
+ priv->avg_sample = 1;
+ i2c_set_clientdata(i2c, priv);
+
+ regmap = devm_regmap_init_i2c(i2c, &rtq6056_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ priv->regmap = regmap;
+
+ ret = regmap_read(regmap, RTQ6056_REG_MANUFACTID, &vendor_id);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get manufacturer info\n");
+
+ if (vendor_id != RTQ6056_VENDOR_ID)
+ return dev_err_probe(dev, -ENODEV,
+ "Invalid vendor id 0x%04x\n", vendor_id);
+
+ ret = devm_regmap_field_bulk_alloc(dev, regmap, priv->rm_fields,
+ rtq6056_reg_fields, F_MAX_FIELDS);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regmap field\n");
+
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 1037 microsecond, and operating mode to all on.
+ */
+ ret = regmap_write(regmap, RTQ6056_REG_CONFIG, RTQ6056_DEFAULT_CONFIG);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable continuous sensing\n");
+
+ ret = devm_add_action_or_reset(dev, rtq6056_enter_shutdown_state, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable pm_runtime\n");
+
+ /* By default, use 2000 micro-Ohm resistor */
+ shunt_resistor_uohm = 2000;
+ device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &shunt_resistor_uohm);
+
+ ret = rtq6056_set_shunt_resistor(priv, shunt_resistor_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init shunt resistor\n");
+
+ indio_dev->name = "rtq6056";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = rtq6056_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rtq6056_channels);
+ indio_dev->info = &rtq6056_info;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ rtq6056_buffer_trigger_handler,
+ NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to allocate iio trigger buffer\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int rtq6056_runtime_suspend(struct device *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+
+ /* Configure to shutdown mode */
+ return regmap_field_write(priv->rm_fields[F_OPMODE], 0);
+}
+
+static int rtq6056_runtime_resume(struct device *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+ int sample_rdy_time_us, ret;
+
+ ret = regmap_field_write(priv->rm_fields[F_OPMODE], RTQ6056_CONT_ALLON);
+ if (ret)
+ return ret;
+
+ sample_rdy_time_us = priv->vbusct_us + priv->vshuntct_us;
+ sample_rdy_time_us *= priv->avg_sample;
+
+ usleep_range(sample_rdy_time_us, sample_rdy_time_us + 100);
+
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(rtq6056_pm_ops, rtq6056_runtime_suspend,
+ rtq6056_runtime_resume, NULL);
+
+static const struct of_device_id rtq6056_device_match[] = {
+ { .compatible = "richtek,rtq6056" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtq6056_device_match);
+
+static struct i2c_driver rtq6056_driver = {
+ .driver = {
+ .name = "rtq6056",
+ .of_match_table = rtq6056_device_match,
+ .pm = pm_ptr(&rtq6056_pm_ops),
+ },
+ .probe_new = rtq6056_probe,
+};
+module_i2c_driver(rtq6056_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ6056 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 1ce52af3fe8b..81d5db91c67b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -62,6 +63,7 @@ struct stm32_adc_priv;
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
+ * @ipid: adc identification number
* @has_syscfg: SYSCFG capability flags
* @num_irqs: number of interrupt lines
* @num_adcs: maximum number of ADC instances in the common registers
@@ -70,6 +72,7 @@ struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
+ u32 ipid;
unsigned int has_syscfg;
unsigned int num_irqs;
unsigned int num_adcs;
@@ -78,6 +81,7 @@ struct stm32_adc_priv_cfg {
/**
* struct stm32_adc_priv - stm32 ADC core private data
* @irq: irq(s) for ADC block
+ * @nb_adc_max: actual maximum number of instance per ADC block
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
@@ -95,6 +99,7 @@ struct stm32_adc_priv_cfg {
*/
struct stm32_adc_priv {
int irq[STM32_ADC_MAX_ADCS];
+ unsigned int nb_adc_max;
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
@@ -354,7 +359,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
* before invoking the interrupt handler (e.g. call ISR only for
* IRQ-enabled ADCs).
*/
- for (i = 0; i < priv->cfg->num_adcs; i++) {
+ for (i = 0; i < priv->nb_adc_max; i++) {
if ((status & priv->cfg->regs->eoc_msk[i] &&
stm32_adc_eoc_enabled(priv, i)) ||
(status & priv->cfg->regs->ovr_msk[i]))
@@ -424,7 +429,7 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
int hwirq;
unsigned int i;
- for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+ for (hwirq = 0; hwirq < priv->nb_adc_max; hwirq++)
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
@@ -642,6 +647,49 @@ static int stm32_adc_core_switches_probe(struct device *dev,
return 0;
}
+static int stm32_adc_probe_identification(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ const char *compat;
+ int ret, count = 0;
+ u32 id, val;
+
+ if (!priv->cfg->ipid)
+ return 0;
+
+ id = FIELD_GET(STM32MP1_IPIDR_MASK,
+ readl_relaxed(priv->common.base + STM32MP1_ADC_IPDR));
+ if (id != priv->cfg->ipid) {
+ dev_err(&pdev->dev, "Unexpected IP version: 0x%x", id);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_string(child, "compatible", &compat);
+ if (ret)
+ continue;
+ /* Count child nodes with stm32 adc compatible */
+ if (strstr(compat, "st,stm32") && strstr(compat, "adc"))
+ count++;
+ }
+
+ val = readl_relaxed(priv->common.base + STM32MP1_ADC_HWCFGR0);
+ priv->nb_adc_max = FIELD_GET(STM32MP1_ADCNUM_MASK, val);
+ if (count > priv->nb_adc_max) {
+ dev_err(&pdev->dev, "Unexpected child number: %d", count);
+ return -EINVAL;
+ }
+
+ val = readl_relaxed(priv->common.base + STM32MP1_ADC_VERR);
+ dev_dbg(&pdev->dev, "ADC version: %lu.%lu\n",
+ FIELD_GET(STM32MP1_MAJREV_MASK, val),
+ FIELD_GET(STM32MP1_MINREV_MASK, val));
+
+ return 0;
+}
+
static int stm32_adc_probe(struct platform_device *pdev)
{
struct stm32_adc_priv *priv;
@@ -661,6 +709,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->cfg = (const struct stm32_adc_priv_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
+ priv->nb_adc_max = priv->cfg->num_adcs;
spin_lock_init(&priv->common.lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -703,6 +752,10 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret)
goto err_pm_stop;
+ ret = stm32_adc_probe_identification(pdev, priv);
+ if (ret < 0)
+ goto err_hw_stop;
+
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
@@ -811,8 +864,8 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .ipid = STM32MP15_IPIDR_NUMBER,
.num_irqs = 2,
- .num_adcs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index faedf7a49555..2118ef63843d 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -24,6 +24,7 @@
* | 0x300 | Master & Slave common regs |
* --------------------------------------------------------
*/
+/* Maximum ADC instances number per ADC block for all supported SoCs */
#define STM32_ADC_MAX_ADCS 3
#define STM32_ADC_OFFSET 0x100
#define STM32_ADCX_COMN_OFFSET 0x300
@@ -105,6 +106,12 @@
/* STM32MP1 - ADC2 instance option register */
#define STM32MP1_ADC2_OR 0xD0
+/* STM32MP1 - Identification registers */
+#define STM32MP1_ADC_HWCFGR0 0x3F0
+#define STM32MP1_ADC_VERR 0x3F4
+#define STM32MP1_ADC_IPDR 0x3F8
+#define STM32MP1_ADC_SIDR 0x3FC
+
/* STM32H7 - common registers for all ADC instances */
#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
@@ -181,6 +188,30 @@ enum stm32h7_adc_dmngt {
/* STM32MP1_ADC2_OR - bit fields */
#define STM32MP1_VDDCOREEN BIT(0)
+/* STM32MP1_ADC_HWCFGR0 - bit fields */
+#define STM32MP1_ADCNUM_SHIFT 0
+#define STM32MP1_ADCNUM_MASK GENMASK(3, 0)
+#define STM32MP1_MULPIPE_SHIFT 4
+#define STM32MP1_MULPIPE_MASK GENMASK(7, 4)
+#define STM32MP1_OPBITS_SHIFT 8
+#define STM32MP1_OPBITS_MASK GENMASK(11, 8)
+#define STM32MP1_IDLEVALUE_SHIFT 12
+#define STM32MP1_IDLEVALUE_MASK GENMASK(15, 12)
+
+/* STM32MP1_ADC_VERR - bit fields */
+#define STM32MP1_MINREV_SHIFT 0
+#define STM32MP1_MINREV_MASK GENMASK(3, 0)
+#define STM32MP1_MAJREV_SHIFT 4
+#define STM32MP1_MAJREV_MASK GENMASK(7, 4)
+
+/* STM32MP1_ADC_IPDR - bit fields */
+#define STM32MP1_IPIDR_MASK GENMASK(31, 0)
+
+/* STM32MP1_ADC_SIDR - bit fields */
+#define STM32MP1_SIDR_MASK GENMASK(31, 0)
+
+#define STM32MP15_IPIDR_NUMBER 0x00110005
+
/**
* struct stm32_adc_common - stm32 ADC driver common data (for all instances)
* @base: control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 130e8dd6f0c8..6256977eb7f7 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -21,11 +21,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include "stm32-adc-core.h"
@@ -241,6 +241,7 @@ struct stm32_adc_cfg {
* @chan_name: channel name array
* @num_diff: number of differential channels
* @int_ch: internal channel indexes array
+ * @nsmps: number of channels with optional sample time
*/
struct stm32_adc {
struct stm32_adc_common *common;
@@ -267,6 +268,7 @@ struct stm32_adc {
char chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
u32 num_diff;
int int_ch[STM32_ADC_INT_CH_NB];
+ int nsmps;
};
struct stm32_adc_diff_channel {
@@ -1520,8 +1522,8 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
return ret;
}
-static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int stm32_adc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
@@ -1575,7 +1577,7 @@ static const struct iio_info stm32_adc_iio_info = {
.hwfifo_set_watermark = stm32_adc_set_watermark,
.update_scan_mode = stm32_adc_update_scan_mode,
.debugfs_reg_access = stm32_adc_debugfs_reg_access,
- .of_xlate = stm32_adc_of_xlate,
+ .fwnode_xlate = stm32_adc_fwnode_xlate,
};
static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
@@ -1772,14 +1774,14 @@ static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = {
{},
};
-static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
+static int stm32_adc_fw_get_resolution(struct iio_dev *indio_dev)
{
- struct device_node *node = indio_dev->dev.of_node;
+ struct device *dev = &indio_dev->dev;
struct stm32_adc *adc = iio_priv(indio_dev);
unsigned int i;
u32 res;
- if (of_property_read_u32(node, "assigned-resolution-bits", &res))
+ if (device_property_read_u32(dev, "assigned-resolution-bits", &res))
res = adc->cfg->adc_info->resolutions[0];
for (i = 0; i < adc->cfg->adc_info->num_res; i++)
@@ -1863,11 +1865,11 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm32_adc *adc)
{
- struct device_node *node = indio_dev->dev.of_node;
+ struct device *dev = &indio_dev->dev;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
int num_channels = 0, ret;
- ret = of_property_count_u32_elems(node, "st,adc-channels");
+ ret = device_property_count_u32(dev, "st,adc-channels");
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
return -EINVAL;
@@ -1875,8 +1877,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
num_channels += ret;
}
- ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
- sizeof(struct stm32_adc_diff_channel));
+ /*
+ * each st,adc-diff-channels is a group of 2 u32 so we divide @ret
+ * to get the *real* number of channels.
+ */
+ ret = device_property_count_u32(dev, "st,adc-diff-channels");
+ if (ret < 0)
+ return ret;
+
+ ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
return -EINVAL;
@@ -1886,8 +1895,8 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
}
/* Optional sample time is provided either for each, or all channels */
- ret = of_property_count_u32_elems(node, "st,min-sample-time-nsecs");
- if (ret > 1 && ret != num_channels) {
+ adc->nsmps = device_property_count_u32(dev, "st,min-sample-time-nsecs");
+ if (adc->nsmps > 1 && adc->nsmps != num_channels) {
dev_err(&indio_dev->dev, "Invalid st,min-sample-time-nsecs\n");
return -EINVAL;
}
@@ -1897,21 +1906,20 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
struct stm32_adc *adc,
- struct iio_chan_spec *channels)
+ struct iio_chan_spec *channels,
+ int nchans)
{
- struct device_node *node = indio_dev->dev.of_node;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
+ struct device *dev = &indio_dev->dev;
u32 num_diff = adc->num_diff;
int size = num_diff * sizeof(*diff) / sizeof(u32);
- int scan_index = 0, val, ret, i;
- struct property *prop;
- const __be32 *cur;
- u32 smp = 0;
+ int scan_index = 0, ret, i, c;
+ u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
if (num_diff) {
- ret = of_property_read_u32_array(node, "st,adc-diff-channels",
- (u32 *)diff, size);
+ ret = device_property_read_u32_array(dev, "st,adc-diff-channels",
+ (u32 *)diff, size);
if (ret) {
dev_err(&indio_dev->dev, "Failed to get diff channels %d\n", ret);
return ret;
@@ -1932,32 +1940,47 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
}
}
- of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
- if (val >= adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
+ nchans);
+ if (ret)
+ return ret;
+
+ for (c = 0; c < nchans; c++) {
+ if (chans[c] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n",
+ chans[c]);
return -EINVAL;
}
/* Channel can't be configured both as single-ended & diff */
for (i = 0; i < num_diff; i++) {
- if (val == diff[i].vinp) {
- dev_err(&indio_dev->dev, "channel %d misconfigured\n", val);
+ if (chans[c] == diff[i].vinp) {
+ dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]);
return -EINVAL;
}
}
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
- 0, scan_index, false);
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ chans[c], 0, scan_index, false);
scan_index++;
}
+ if (adc->nsmps > 0) {
+ ret = device_property_read_u32_array(dev, "st,min-sample-time-nsecs",
+ smps, adc->nsmps);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < scan_index; i++) {
/*
- * Using of_property_read_u32_index(), smp value will only be
- * modified if valid u32 value can be decoded. This allows to
- * get either no value, 1 shared value for all indexes, or one
- * value per channel.
+ * This check is used with the above logic so that smp value
+ * will only be modified if valid u32 value can be decoded. This
+ * allows to get either no value, 1 shared value for all indexes,
+ * or one value per channel. The point is to have the same
+ * behavior as 'of_property_read_u32_index()'.
*/
- of_property_read_u32_index(node, "st,min-sample-time-nsecs", i, &smp);
+ if (i < adc->nsmps)
+ smp = smps[i];
/* Prepare sampling time settings */
stm32_adc_smpr_init(adc, channels[i].channel, smp);
@@ -2005,22 +2028,21 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
struct stm32_adc *adc,
struct iio_chan_spec *channels)
{
- struct device_node *node = indio_dev->dev.of_node;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
- struct device_node *child;
+ struct fwnode_handle *child;
const char *name;
int val, scan_index = 0, ret;
bool differential;
u32 vin[2];
- for_each_available_child_of_node(node, child) {
- ret = of_property_read_u32(child, "reg", &val);
+ device_for_each_child_node(&indio_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &val);
if (ret) {
dev_err(&indio_dev->dev, "Missing channel index %d\n", ret);
goto err;
}
- ret = of_property_read_string(child, "label", &name);
+ ret = fwnode_property_read_string(child, "label", &name);
/* label is optional */
if (!ret) {
if (strlen(name) >= STM32_ADC_CH_SZ) {
@@ -2047,7 +2069,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
}
differential = false;
- ret = of_property_read_u32_array(child, "diff-channels", vin, 2);
+ ret = fwnode_property_read_u32_array(child, "diff-channels", vin, 2);
/* diff-channels is optional */
if (!ret) {
differential = true;
@@ -2064,7 +2086,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
vin[1], scan_index, differential);
- ret = of_property_read_u32(child, "st,min-sample-time-ns", &val);
+ ret = fwnode_property_read_u32(child, "st,min-sample-time-ns", &val);
/* st,min-sample-time-ns is optional */
if (!ret) {
stm32_adc_smpr_init(adc, channels[scan_index].channel, val);
@@ -2082,14 +2104,13 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
return scan_index;
err:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
+static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
{
- struct device_node *node = indio_dev->dev.of_node;
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
struct iio_chan_spec *channels;
@@ -2099,7 +2120,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
for (i = 0; i < STM32_ADC_INT_CH_NB; i++)
adc->int_ch[i] = STM32_ADC_INT_CH_NONE;
- num_channels = of_get_available_child_count(node);
+ num_channels = device_get_child_node_count(&indio_dev->dev);
/* If no channels have been found, fallback to channels legacy properties. */
if (!num_channels) {
legacy = true;
@@ -2130,7 +2151,8 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
return -ENOMEM;
if (legacy)
- ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels);
+ ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
+ num_channels);
else
ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
if (ret < 0)
@@ -2212,9 +2234,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
bool timestamping = false;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
@@ -2223,17 +2242,16 @@ static int stm32_adc_probe(struct platform_device *pdev)
adc->common = dev_get_drvdata(pdev->dev.parent);
spin_lock_init(&adc->lock);
init_completion(&adc->completion);
- adc->cfg = (const struct stm32_adc_cfg *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ adc->cfg = device_get_match_data(dev);
indio_dev->name = dev_name(&pdev->dev);
- indio_dev->dev.of_node = pdev->dev.of_node;
+ device_set_node(&indio_dev->dev, dev_fwnode(&pdev->dev));
indio_dev->info = &stm32_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED;
platform_set_drvdata(pdev, indio_dev);
- ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+ ret = device_property_read_u32(dev, "reg", &adc->offset);
if (ret != 0) {
dev_err(&pdev->dev, "missing reg property\n");
return -EINVAL;
@@ -2262,7 +2280,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
}
}
- ret = stm32_adc_of_get_resolution(indio_dev);
+ ret = stm32_adc_fw_get_resolution(indio_dev);
if (ret < 0)
return ret;
@@ -2279,7 +2297,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
timestamping = true;
}
- ret = stm32_adc_chan_of_init(indio_dev, timestamping);
+ ret = stm32_adc_chan_fw_init(indio_dev, timestamping);
if (ret < 0)
goto err_dma_disable;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 32237cacc9a3..5235a93f28bc 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -797,13 +797,6 @@ static void ads131e08_regulator_disable(void *data)
regulator_disable(st->vref_reg);
}
-static void ads131e08_clk_disable(void *data)
-{
- struct ads131e08_state *st = data;
-
- clk_disable_unprepare(st->adc_clk);
-}
-
static int ads131e08_probe(struct spi_device *spi)
{
const struct ads131e08_info *info;
@@ -896,21 +889,11 @@ static int ads131e08_probe(struct spi_device *spi)
st->vref_reg = NULL;
}
- st->adc_clk = devm_clk_get(&spi->dev, "adc-clk");
+ st->adc_clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->adc_clk))
return dev_err_probe(&spi->dev, PTR_ERR(st->adc_clk),
"failed to get the ADC clock\n");
- ret = clk_prepare_enable(st->adc_clk);
- if (ret) {
- dev_err(&spi->dev, "failed to prepare/enable the ADC clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ads131e08_clk_disable, st);
- if (ret)
- return ret;
-
adc_clk_hz = clk_get_rate(st->adc_clk);
if (!adc_clk_hz) {
dev_err(&spi->dev, "failed to get the ADC clock rate\n");
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 0d9436a69cbf..1bbb51a6683c 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -8,7 +8,9 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
@@ -139,6 +141,7 @@ enum tsc2046_state {
struct tsc2046_adc_priv {
struct spi_device *spi;
const struct tsc2046_adc_dcfg *dcfg;
+ struct regulator *vref_reg;
struct iio_trigger *trig;
struct hrtimer trig_timer;
@@ -173,6 +176,7 @@ struct tsc2046_adc_priv {
u32 scan_interval_us;
u32 time_per_scan_us;
u32 time_per_bit_ns;
+ unsigned int vref_mv;
struct tsc2046_adc_ch_cfg ch_cfg[TI_TSC2046_MAX_CHAN];
};
@@ -252,7 +256,9 @@ static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx,
case TI_TSC2046_ADDR_AUX:
case TI_TSC2046_ADDR_VBAT:
case TI_TSC2046_ADDR_TEMP0:
- pd |= TI_TSC2046_SER | TI_TSC2046_PD1_VREF_ON;
+ pd |= TI_TSC2046_SER;
+ if (!priv->vref_reg)
+ pd |= TI_TSC2046_PD1_VREF_ON;
}
return TI_TSC2046_START | FIELD_PREP(TI_TSC2046_ADDR, ch_idx) | pd;
@@ -468,7 +474,7 @@ static int tsc2046_adc_read_raw(struct iio_dev *indio_dev,
* So, it is better to use external voltage-divider driver
* instead, which is calculating complete chain.
*/
- *val = TI_TSC2046_INT_VREF;
+ *val = priv->vref_mv;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -740,6 +746,49 @@ static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv)
}
}
+static void tsc2046_adc_regulator_disable(void *data)
+{
+ struct tsc2046_adc_priv *priv = data;
+
+ regulator_disable(priv->vref_reg);
+}
+
+static int tsc2046_adc_configure_regulator(struct tsc2046_adc_priv *priv)
+{
+ struct device *dev = &priv->spi->dev;
+ int ret;
+
+ priv->vref_reg = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(priv->vref_reg)) {
+ /* If regulator exists but can't be get, return an error */
+ if (PTR_ERR(priv->vref_reg) != -ENODEV)
+ return PTR_ERR(priv->vref_reg);
+ priv->vref_reg = NULL;
+ }
+ if (!priv->vref_reg) {
+ /* Use internal reference */
+ priv->vref_mv = TI_TSC2046_INT_VREF;
+ return 0;
+ }
+
+ ret = regulator_enable(priv->vref_reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, tsc2046_adc_regulator_disable,
+ priv);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(priv->vref_reg);
+ if (ret < 0)
+ return ret;
+
+ priv->vref_mv = ret / MILLI;
+
+ return 0;
+}
+
static int tsc2046_adc_probe(struct spi_device *spi)
{
const struct tsc2046_adc_dcfg *dcfg;
@@ -756,6 +805,11 @@ static int tsc2046_adc_probe(struct spi_device *spi)
}
dcfg = device_get_match_data(dev);
+ if (!dcfg) {
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ dcfg = (const struct tsc2046_adc_dcfg *)id->driver_data;
+ }
if (!dcfg)
return -EINVAL;
@@ -781,6 +835,10 @@ static int tsc2046_adc_probe(struct spi_device *spi)
indio_dev->num_channels = dcfg->num_channels;
indio_dev->info = &tsc2046_adc_info;
+ ret = tsc2046_adc_configure_regulator(priv);
+ if (ret)
+ return ret;
+
tsc2046_adc_parse_fwnode(priv);
ret = tsc2046_adc_setup_spi_msg(priv);
@@ -833,11 +891,18 @@ static const struct of_device_id ads7950_of_table[] = {
};
MODULE_DEVICE_TABLE(of, ads7950_of_table);
+static const struct spi_device_id tsc2046_adc_spi_ids[] = {
+ { "tsc2046e-adc", (unsigned long)&tsc2046_adc_dcfg_tsc2046e },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, tsc2046_adc_spi_ids);
+
static struct spi_driver tsc2046_adc_driver = {
.driver = {
.name = "tsc2046",
.of_match_table = ads7950_of_table,
},
+ .id_table = tsc2046_adc_spi_ids,
.probe = tsc2046_adc_probe,
};
module_spi_driver(tsc2046_adc_driver);
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 9cd2713146e5..5b4bdf3a26bb 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1351,11 +1351,6 @@ static const struct of_device_id ams_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ams_of_match_table);
-static void ams_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int ams_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -1380,18 +1375,10 @@ static int ams_probe(struct platform_device *pdev)
if (IS_ERR(ams->base))
return PTR_ERR(ams->base);
- ams->clk = devm_clk_get(&pdev->dev, NULL);
+ ams->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(ams->clk))
return PTR_ERR(ams->clk);
- ret = clk_prepare_enable(ams->clk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
- if (ret < 0)
- return ret;
-
ret = devm_delayed_work_autocancel(&pdev->dev, &ams->ams_unmask_work,
ams_unmask_worker);
if (ret < 0)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 1b247722ba25..292f2892d223 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1296,13 +1296,6 @@ static const char * const xadc_type_names[] = {
[XADC_TYPE_US] = "xilinx-system-monitor",
};
-static void xadc_clk_disable_unprepare(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static void xadc_cancel_delayed_work(void *data)
{
struct delayed_work *work = data;
@@ -1374,19 +1367,10 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- xadc->clk = devm_clk_get(dev, NULL);
+ xadc->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(xadc->clk))
return PTR_ERR(xadc->clk);
- ret = clk_prepare_enable(xadc->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev,
- xadc_clk_disable_unprepare, xadc->clk);
- if (ret)
- return ret;
-
/*
* Make sure not to exceed the maximum samplerate since otherwise the
* resulting interrupt storm will soft-lock the system.
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
index 138492362f20..fcf6d2269bfc 100644
--- a/drivers/iio/addac/Kconfig
+++ b/drivers/iio/addac/Kconfig
@@ -17,4 +17,20 @@ config AD74413R
To compile this driver as a module, choose M here: the
module will be called ad74413r.
+config STX104
+ tristate "Apex Embedded Systems STX104 driver"
+ depends on PC104 && X86
+ select ISA_BUS_API
+ select GPIOLIB
+ help
+ Say yes here to build support for the Apex Embedded Systems STX104
+ integrated analog PC/104 card.
+
+ This driver supports the 16 channels of single-ended (8 channels of
+ differential) analog inputs, 2 channels of analog output, 4 digital
+ inputs, and 4 digital outputs provided by the STX104.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
endmenu
diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
index cfd4bbe64ad3..17de20ef0d8e 100644
--- a/drivers/iio/addac/Makefile
+++ b/drivers/iio/addac/Makefile
@@ -5,3 +5,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD74413R) += ad74413r.o
+obj-$(CONFIG_STX104) += stx104.o
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/addac/stx104.c
index 48a91a95e597..48a91a95e597 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/addac/stx104.c
diff --git a/drivers/iio/cdc/Kconfig b/drivers/iio/cdc/Kconfig
index 5e3319a3ff48..e0a5ce66a984 100644
--- a/drivers/iio/cdc/Kconfig
+++ b/drivers/iio/cdc/Kconfig
@@ -14,4 +14,14 @@ config AD7150
To compile this driver as a module, choose M here: the
module will be called ad7150.
+config AD7746
+ tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (AD7745, AD7746, AD7747) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7746.
+
endmenu
diff --git a/drivers/iio/cdc/Makefile b/drivers/iio/cdc/Makefile
index ee490637b032..41db756d8020 100644
--- a/drivers/iio/cdc/Makefile
+++ b/drivers/iio/cdc/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_AD7150) += ad7150.o
+obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c
index 52b8957c19c9..b266f5328140 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/iio/cdc/ad7746.c
@@ -5,6 +5,7 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
@@ -15,12 +16,12 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include <asm/unaligned.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-/*
- * AD7746 Register Definition
- */
+/* AD7746 Register Definition */
#define AD7746_REG_STATUS 0
#define AD7746_REG_CAP_DATA_HIGH 1
@@ -48,11 +49,12 @@
#define AD7746_CAPSETUP_CACHOP BIT(0)
/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
-#define AD7746_VTSETUP_VTEN (1 << 7)
-#define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
-#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
+#define AD7746_VTSETUP_VTEN BIT(7)
+#define AD7746_VTSETUP_VTMD_MASK GENMASK(6, 5)
+#define AD7746_VTSETUP_VTMD_INT_TEMP 0
+#define AD7746_VTSETUP_VTMD_EXT_TEMP 1
+#define AD7746_VTSETUP_VTMD_VDD_MON 2
+#define AD7746_VTSETUP_VTMD_EXT_VIN 3
#define AD7746_VTSETUP_EXTREF BIT(4)
#define AD7746_VTSETUP_VTSHORT BIT(1)
#define AD7746_VTSETUP_VTCHOP BIT(0)
@@ -64,23 +66,22 @@
#define AD7746_EXCSETUP_NEXCB BIT(4)
#define AD7746_EXCSETUP_EXCA BIT(3)
#define AD7746_EXCSETUP_NEXCA BIT(2)
-#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
+#define AD7746_EXCSETUP_EXCLVL_MASK GENMASK(1, 0)
/* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS_SHIFT 6
-#define AD7746_CONF_CAPFS_SHIFT 3
#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
-#define AD7746_CONF_MODE_IDLE (0 << 0)
-#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
-#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
-#define AD7746_CONF_MODE_PWRDN (3 << 0)
-#define AD7746_CONF_MODE_OFFS_CAL (5 << 0)
-#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
+#define AD7746_CONF_MODE_MASK GENMASK(2, 0)
+#define AD7746_CONF_MODE_IDLE 0
+#define AD7746_CONF_MODE_CONT_CONV 1
+#define AD7746_CONF_MODE_SINGLE_CONV 2
+#define AD7746_CONF_MODE_PWRDN 3
+#define AD7746_CONF_MODE_OFFS_CAL 5
+#define AD7746_CONF_MODE_GAIN_CAL 6
/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
#define AD7746_CAPDAC_DACEN BIT(7)
-#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
+#define AD7746_CAPDAC_DACP_MASK GENMASK(6, 0)
struct ad7746_chip_info {
struct i2c_client *client;
@@ -94,11 +95,6 @@ struct ad7746_chip_info {
u8 vt_setup;
u8 capdac[2][2];
s8 capdac_set;
-
- union {
- __be32 d32;
- u8 d8[4];
- } data ____cacheline_aligned;
};
enum ad7746_chan {
@@ -112,43 +108,87 @@ enum ad7746_chan {
CIN2_DIFF,
};
+struct ad7746_chan_info {
+ u8 addr;
+ union {
+ u8 vtmd;
+ struct { /* CAP SETUP fields */
+ unsigned int cin2 : 1;
+ unsigned int capdiff : 1;
+ };
+ };
+};
+
+static const struct ad7746_chan_info ad7746_chan_info[] = {
+ [VIN] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_EXT_VIN,
+ },
+ [VIN_VDD] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_VDD_MON,
+ },
+ [TEMP_INT] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_INT_TEMP,
+ },
+ [TEMP_EXT] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_EXT_TEMP,
+ },
+ [CIN1] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ },
+ [CIN1_DIFF] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .capdiff = 1,
+ },
+ [CIN2] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .cin2 = 1,
+ },
+ [CIN2_DIFF] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .cin2 = 1,
+ .capdiff = 1,
+ },
+};
+
static const struct iio_chan_spec ad7746_channels[] = {
[VIN] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_VIN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = VIN,
},
[VIN_VDD] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
.extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_VDD_MON,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = VIN_VDD,
},
[TEMP_INT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_INT_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = TEMP_INT,
},
[TEMP_EXT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = TEMP_EXT,
},
[CIN1] = {
.type = IIO_CAPACITANCE,
@@ -158,7 +198,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN1,
},
[CIN1_DIFF] = {
.type = IIO_CAPACITANCE,
@@ -167,11 +208,11 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 0,
.channel2 = 2,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN1_DIFF,
},
[CIN2] = {
.type = IIO_CAPACITANCE,
@@ -181,8 +222,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CIN2,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN2,
},
[CIN2_DIFF] = {
.type = IIO_CAPACITANCE,
@@ -191,22 +232,22 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 1,
.channel2 = 3,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN2_DIFF,
}
};
/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
static const unsigned char ad7746_vt_filter_rate_table[][2] = {
- {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1},
+ { 50, 20 + 1 }, { 31, 32 + 1 }, { 16, 62 + 1 }, { 8, 122 + 1 },
};
static const unsigned char ad7746_cap_filter_rate_table[][2] = {
- {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1},
- {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
+ { 91, 11 + 1 }, { 84, 12 + 1 }, { 50, 20 + 1 }, { 26, 38 + 1 },
+ { 16, 62 + 1 }, { 13, 77 + 1 }, { 11, 92 + 1 }, { 9, 110 + 1 },
};
static int ad7746_set_capdac(struct ad7746_chip_info *chip, int channel)
@@ -231,10 +272,13 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_CAPACITANCE:
- cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
+ cap_setup = FIELD_PREP(AD7746_CAPSETUP_CIN2,
+ ad7746_chan_info[chan->address].cin2) |
+ FIELD_PREP(AD7746_CAPSETUP_CAPDIFF,
+ ad7746_chan_info[chan->address].capdiff) |
+ FIELD_PREP(AD7746_CAPSETUP_CAPEN, 1);
vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
- idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
- AD7746_CONF_CAPFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
ret = ad7746_set_capdac(chip, chan->channel);
@@ -246,10 +290,11 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
break;
case IIO_VOLTAGE:
case IIO_TEMP:
- vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
+ vt_setup = FIELD_PREP(AD7746_VTSETUP_VTMD_MASK,
+ ad7746_chan_info[chan->address].vtmd) |
+ FIELD_PREP(AD7746_VTSETUP_VTEN, 1);
cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
- idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
- AD7746_CONF_VTFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
break;
default:
@@ -332,7 +377,8 @@ static ssize_t ad7746_start_offset_calib(struct device *dev,
return ret;
return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_OFFS_CAL);
+ FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_OFFS_CAL));
}
static ssize_t ad7746_start_gain_calib(struct device *dev,
@@ -347,7 +393,8 @@ static ssize_t ad7746_start_gain_calib(struct device *dev,
return ret;
return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_GAIN_CAL);
+ FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_GAIN_CAL));
}
static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
@@ -374,7 +421,7 @@ static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_CAPFS_MASK;
- chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
+ chip->config |= FIELD_PREP(AD7746_CONF_CAPFS_MASK, i);
return 0;
}
@@ -392,23 +439,17 @@ static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_VTFS_MASK;
- chip->config |= i << AD7746_CONF_VTFS_SHIFT;
+ chip->config |= FIELD_PREP(AD7746_CONF_VTFS_MASK, i);
return 0;
}
-static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
-static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
- "91 84 50 26 16 13 11 9");
-
static struct attribute *ad7746_attributes[] = {
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
- &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
- &iio_const_attr_in_capacitance_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -425,14 +466,10 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, reg;
- mutex_lock(&chip->lock);
-
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
- if (val != 1) {
- ret = -EINVAL;
- goto out;
- }
+ if (val != 1)
+ return -EINVAL;
val = (val2 * 1024) / 15625;
@@ -444,33 +481,32 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
reg = AD7746_REG_VOLT_GAINH;
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client, reg, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < 0 || val > 0xFFFF) {
- ret = -EINVAL;
- goto out;
- }
+ if (val < 0 || val > 0xFFFF)
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client,
AD7746_REG_CAP_OFFH, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_OFFSET:
- if (val < 0 || val > 43008000) { /* 21pF */
- ret = -EINVAL;
- goto out;
- }
+ case IIO_CHAN_INFO_ZEROPOINT:
+ if (val < 0 || val > 43008000) /* 21pF */
+ return -EINVAL;
/*
* CAPDAC Scale = 21pF_typ / 127
@@ -479,42 +515,104 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
*/
val /= 338646;
-
+ mutex_lock(&chip->lock);
chip->capdac[chan->channel][chan->differential] = val > 0 ?
- AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0;
+ FIELD_PREP(AD7746_CAPDAC_DACP_MASK, val) | AD7746_CAPDAC_DACEN : 0;
ret = ad7746_set_capdac(chip, chan->channel);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ mutex_unlock(&chip->lock);
+ return ret;
+ }
chip->capdac_set = chan->channel;
+ mutex_unlock(&chip->lock);
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2) {
- ret = -EINVAL;
- goto out;
- }
+ if (val2)
+ return -EINVAL;
switch (chan->type) {
case IIO_CAPACITANCE:
+ mutex_lock(&chip->lock);
ret = ad7746_store_cap_filter_rate_setup(chip, val);
- break;
+ mutex_unlock(&chip->lock);
+ return ret;
case IIO_VOLTAGE:
+ mutex_lock(&chip->lock);
ret = ad7746_store_vt_filter_rate_setup(chip, val);
- break;
+ mutex_unlock(&chip->lock);
+ return ret;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+ default:
+ return -EINVAL;
+ }
+}
+
+static const int ad7746_v_samp_freq[] = { 50, 31, 16, 8, };
+static const int ad7746_cap_samp_freq[] = { 91, 84, 50, 26, 16, 13, 11, 9, };
+
+static int ad7746_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad7746_v_samp_freq;
+ *length = ARRAY_SIZE(ad7746_v_samp_freq);
+ break;
+ case IIO_CAPACITANCE:
+ *vals = ad7746_cap_samp_freq;
+ *length = ARRAY_SIZE(ad7746_cap_samp_freq);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+}
-out:
- mutex_unlock(&chip->lock);
- return ret;
+static int ad7746_read_channel(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 data[3];
+ u8 regval;
+
+ ret = ad7746_select_channel(indio_dev, chan);
+ if (ret < 0)
+ return ret;
+ delay = ret;
+
+ regval = chip->config | FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_SINGLE_CONV);
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
+ if (ret < 0)
+ return ret;
+
+ msleep(delay);
+ /* Now read the actual register */
+ ret = i2c_smbus_read_i2c_block_data(chip->client,
+ ad7746_chan_info[chan->address].addr,
+ sizeof(data), data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Offset applied internally becaue the _offset userspace interface is
+ * needed for the CAP DACs which apply a controllable offset.
+ */
+ *val = get_unaligned_be24(data) - 0x800000;
+
+ return 0;
}
static int ad7746_read_raw(struct iio_dev *indio_dev,
@@ -523,55 +621,18 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay, idx;
- u8 regval, reg;
-
- mutex_lock(&chip->lock);
+ int ret, idx;
+ u8 reg;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
- ret = ad7746_select_channel(indio_dev, chan);
- if (ret < 0)
- goto out;
- delay = ret;
-
- regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
- ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
- regval);
- if (ret < 0)
- goto out;
-
- msleep(delay);
- /* Now read the actual register */
-
- ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3,
- &chip->data.d8[1]);
-
+ mutex_lock(&chip->lock);
+ ret = ad7746_read_channel(indio_dev, chan, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
-
- *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000;
-
- switch (chan->type) {
- case IIO_TEMP:
- /*
- * temperature in milli degrees Celsius
- * T = ((*val / 2048) - 4096) * 1000
- */
- *val = (*val * 125) / 256;
- break;
- case IIO_VOLTAGE:
- if (chan->channel == 1) /* supply_raw*/
- *val = *val * 6;
- break;
- default:
- break;
- }
+ return ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
@@ -581,83 +642,78 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
reg = AD7746_REG_VOLT_GAINH;
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client, reg);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
/* 1 + gain_val / 2^16 */
*val = 1;
*val2 = (15625 * ret) / 1024;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client,
AD7746_REG_CAP_OFFH);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
*val = ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
- [chan->differential]) * 338646;
+ case IIO_CHAN_INFO_ZEROPOINT:
+ *val = FIELD_GET(AD7746_CAPDAC_DACP_MASK,
+ chip->capdac[chan->channel][chan->differential]) * 338646;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
*val = 0;
*val2 = 488;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_VOLTAGE:
/* 1170mV / 2^23 */
*val = 1170;
+ if (chan->channel == 1)
+ *val *= 6;
*val2 = 23;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = 125;
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- break;
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_CAPACITANCE:
- idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
- AD7746_CONF_CAPFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
*val = ad7746_cap_filter_rate_table[idx][0];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_VOLTAGE:
- idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
- AD7746_CONF_VTFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
*val = ad7746_vt_filter_rate_table[idx][0];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
- mutex_unlock(&chip->lock);
- return ret;
}
static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
.read_raw = ad7746_read_raw,
+ .read_avail = ad7746_read_avail,
.write_raw = ad7746_write_raw,
};
@@ -674,10 +730,9 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
+
chip = iio_priv(indio_dev);
mutex_init(&chip->lock);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
chip->client = client;
chip->capdac_set = -1;
@@ -710,24 +765,24 @@ static int ad7746_probe(struct i2c_client *client,
if (!ret) {
switch (vdd_permille) {
case 125:
- regval |= AD7746_EXCSETUP_EXCLVL(0);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 0);
break;
case 250:
- regval |= AD7746_EXCSETUP_EXCLVL(1);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 1);
break;
case 375:
- regval |= AD7746_EXCSETUP_EXCLVL(2);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 2);
break;
case 500:
- regval |= AD7746_EXCSETUP_EXCLVL(3);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 3);
break;
default:
break;
}
}
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_EXC_SETUP, regval);
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_EXC_SETUP,
+ regval);
if (ret < 0)
return ret;
@@ -740,7 +795,6 @@ static const struct i2c_device_id ad7746_id[] = {
{ "ad7747", 7747 },
{}
};
-
MODULE_DEVICE_TABLE(i2c, ad7746_id);
static const struct of_device_id ad7746_of_match[] = {
@@ -749,7 +803,6 @@ static const struct of_device_id ad7746_of_match[] = {
{ .compatible = "adi,ad7747" },
{ },
};
-
MODULE_DEVICE_TABLE(of, ad7746_of_match);
static struct i2c_driver ad7746_driver = {
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 793d628db55f..54ccf19ab2bb 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -18,6 +18,7 @@
#include <linux/scmi_protocol.h>
#include <linux/time.h>
#include <linux/types.h>
+#include <linux/units.h>
#define SCMI_IIO_NUM_OF_AXIS 3
@@ -130,7 +131,6 @@ static const struct iio_buffer_setup_ops scmi_iio_buffer_ops = {
static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
{
struct scmi_iio_priv *sensor = iio_priv(iio_dev);
- const unsigned long UHZ_PER_HZ = 1000000UL;
u64 sec, mult, uHz, sf;
u32 sensor_config;
char buf[32];
@@ -145,7 +145,7 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
return err;
}
- uHz = val * UHZ_PER_HZ + val2;
+ uHz = val * MICROHZ_PER_HZ + val2;
/*
* The seconds field in the sensor interval in SCMI is 16 bits long
@@ -156,10 +156,10 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
* count the number of characters
*/
sf = (u64)uHz * 0xFFFF;
- do_div(sf, UHZ_PER_HZ);
+ do_div(sf, MICROHZ_PER_HZ);
mult = scnprintf(buf, sizeof(buf), "%llu", sf) - 1;
- sec = int_pow(10, mult) * UHZ_PER_HZ;
+ sec = int_pow(10, mult) * MICROHZ_PER_HZ;
do_div(sec, uHz);
if (sec == 0) {
dev_err(&iio_dev->dev,
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 9910ba1da085..35720c64fea8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -354,7 +354,7 @@ void st_sensors_dev_name_probe(struct device *dev, char *name, int len)
return;
/* The name from the match takes precedence if present */
- strlcpy(name, match, len);
+ strscpy(name, match, len);
}
EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, IIO_ST_SENSORS);
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 92be661034a6..8e5e014e0c28 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <asm/unaligned.h>
+
#define AD5593R_MODE_CONF (0 << 4)
#define AD5593R_MODE_DAC_WRITE (1 << 4)
#define AD5593R_MODE_ADC_READBACK (4 << 4)
@@ -20,6 +22,24 @@
#define AD5593R_MODE_GPIO_READBACK (6 << 4)
#define AD5593R_MODE_REG_READBACK (7 << 4)
+static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value)
+{
+ int ret;
+ u8 buf[2];
+
+ ret = i2c_smbus_write_byte(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ *value = get_unaligned_be16(buf);
+
+ return 0;
+}
+
static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
@@ -38,13 +58,7 @@ static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
if (val < 0)
return (int) val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value);
}
static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
@@ -58,25 +72,19 @@ static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
-
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
- if (val < 0)
- return (int) val;
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value);
}
static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
+ u16 val;
+ int ret;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
- if (val < 0)
- return (int) val;
+ ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val);
+ if (ret)
+ return ret;
*value = (u8) val;
@@ -94,6 +102,10 @@ static const struct ad5592r_rw_ops ad5593r_rw_ops = {
static int ad5593r_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
}
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index 135c8cedc33d..b27088464826 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -540,13 +540,6 @@ static int adf4371_setup(struct adf4371_state *st)
return regmap_bulk_write(st->regmap, ADF4371_REG(0x30), st->buf, 5);
}
-static void adf4371_clk_disable(void *data)
-{
- struct adf4371_state *st = data;
-
- clk_disable_unprepare(st->clkin);
-}
-
static int adf4371_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -579,18 +572,10 @@ static int adf4371_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
- st->clkin = devm_clk_get(&spi->dev, "clkin");
+ st->clkin = devm_clk_get_enabled(&spi->dev, "clkin");
if (IS_ERR(st->clkin))
return PTR_ERR(st->clkin);
- ret = clk_prepare_enable(st->clkin);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, adf4371_clk_disable, st);
- if (ret)
- return ret;
-
st->clkin_freq = clk_get_rate(st->clkin);
ret = adf4371_setup(st);
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index 865addd10db4..bb5e1feef42b 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -669,8 +669,7 @@ static int admv1014_init(struct admv1014_state *st)
chip_id = FIELD_GET(ADMV1014_CHIP_ID_MSK, chip_id);
if (chip_id != ADMV1014_CHIP_ID) {
dev_err(&spi->dev, "Invalid Chip ID.\n");
- ret = -EINVAL;
- return ret;
+ return -EINVAL;
}
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_QUAD,
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
index 21878bad0909..b4defb82f37e 100644
--- a/drivers/iio/frequency/adrf6780.c
+++ b/drivers/iio/frequency/adrf6780.c
@@ -441,11 +441,6 @@ static void adrf6780_properties_parse(struct adrf6780_state *st)
st->vdet_out_en = device_property_read_bool(&spi->dev, "adi,vdet-out-en");
}
-static void adrf6780_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void adrf6780_powerdown(void *data)
{
/* Disable all components in the Enable Register */
@@ -473,20 +468,11 @@ static int adrf6780_probe(struct spi_device *spi)
adrf6780_properties_parse(st);
- st->clkin = devm_clk_get(&spi->dev, "lo_in");
+ st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
- ret = clk_prepare_enable(st->clkin);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, adrf6780_clk_disable,
- st->clkin);
- if (ret)
- return ret;
-
mutex_init(&st->lock);
ret = adrf6780_init(st);
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 001ca2c3ff95..f1d7d4b5e222 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -52,6 +52,7 @@ config ADIS16480
ADIS16485, ADIS16488 inertial sensors.
source "drivers/iio/imu/bmi160/Kconfig"
+source "drivers/iio/imu/bno055/Kconfig"
config FXOS8700
tristate
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index c82748096c77..6eb612034722 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -15,6 +15,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+obj-y += bno055/
obj-$(CONFIG_FXOS8700) += fxos8700_core.o
obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index ff2b0fab840a..aec55f7e1f26 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1120,11 +1120,6 @@ check_burst32:
return IRQ_HANDLED;
}
-static void adis16475_disable_clk(void *data)
-{
- clk_disable_unprepare((struct clk *)data);
-}
-
static int adis16475_config_sync_mode(struct adis16475 *st)
{
int ret;
@@ -1150,19 +1145,11 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
/* All the other modes require external input signal */
if (sync->sync_mode != ADIS16475_SYNC_OUTPUT) {
- struct clk *clk = devm_clk_get(dev, NULL);
+ struct clk *clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, adis16475_disable_clk, clk);
- if (ret)
- return ret;
-
st->clk_freq = clk_get_rate(clk);
if (st->clk_freq < sync->min_rate ||
st->clk_freq > sync->max_rate) {
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
new file mode 100644
index 000000000000..fa79b1ac4f85
--- /dev/null
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config BOSCH_BNO055
+ tristate
+
+config BOSCH_BNO055_SERIAL
+ tristate "Bosch BNO055 attached via UART"
+ depends on SERIAL_DEV_BUS
+ select BOSCH_BNO055
+ help
+ Enable this to support Bosch BNO055 IMUs attached via UART.
+
+ This driver can also be built as a module. If so, the module will be
+ called bno055_sl.
+
+config BOSCH_BNO055_I2C
+ tristate "Bosch BNO055 attached via I2C bus"
+ depends on I2C
+ select REGMAP_I2C
+ select BOSCH_BNO055
+ help
+ Enable this to support Bosch BNO055 IMUs attached via I2C bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called bno055_i2c.
diff --git a/drivers/iio/imu/bno055/Makefile b/drivers/iio/imu/bno055/Makefile
new file mode 100644
index 000000000000..98c624730dae
--- /dev/null
+++ b/drivers/iio/imu/bno055/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_BOSCH_BNO055) += bno055.o
+obj-$(CONFIG_BOSCH_BNO055_SERIAL) += bno055_ser.o
+bno055_ser-y := bno055_ser_core.o
+# define_trace.h needs to know how to find our header
+CFLAGS_bno055_ser_trace.o := -I$(src)
+bno055_ser-$(CONFIG_TRACING) += bno055_ser_trace.o
+
+obj-$(CONFIG_BOSCH_BNO055_I2C) += bno055_i2c.o
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
new file mode 100644
index 000000000000..307557a609e3
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO driver for Bosch BNO055 IMU
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ *
+ * Portions of this driver are taken from the BNO055 driver patch
+ * from Vlad Dogaru which is Copyright (c) 2016, Intel Corporation.
+ *
+ * This driver is also based on BMI160 driver, which is:
+ * Copyright (c) 2016, Intel Corporation.
+ * Copyright (c) 2019, Martin Kelly.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "bno055.h"
+
+#define BNO055_FW_UID_FMT "bno055-caldata-%*phN.dat"
+#define BNO055_FW_GENERIC_NAME "bno055-caldata.dat"
+
+/* common registers */
+#define BNO055_PAGESEL_REG 0x7
+
+/* page 0 registers */
+#define BNO055_CHIP_ID_REG 0x0
+#define BNO055_CHIP_ID_MAGIC 0xA0
+#define BNO055_SW_REV_LSB_REG 0x4
+#define BNO055_SW_REV_MSB_REG 0x5
+#define BNO055_ACC_DATA_X_LSB_REG 0x8
+#define BNO055_ACC_DATA_Y_LSB_REG 0xA
+#define BNO055_ACC_DATA_Z_LSB_REG 0xC
+#define BNO055_MAG_DATA_X_LSB_REG 0xE
+#define BNO055_MAG_DATA_Y_LSB_REG 0x10
+#define BNO055_MAG_DATA_Z_LSB_REG 0x12
+#define BNO055_GYR_DATA_X_LSB_REG 0x14
+#define BNO055_GYR_DATA_Y_LSB_REG 0x16
+#define BNO055_GYR_DATA_Z_LSB_REG 0x18
+#define BNO055_EUL_DATA_X_LSB_REG 0x1A
+#define BNO055_EUL_DATA_Y_LSB_REG 0x1C
+#define BNO055_EUL_DATA_Z_LSB_REG 0x1E
+#define BNO055_QUAT_DATA_W_LSB_REG 0x20
+#define BNO055_LIA_DATA_X_LSB_REG 0x28
+#define BNO055_LIA_DATA_Y_LSB_REG 0x2A
+#define BNO055_LIA_DATA_Z_LSB_REG 0x2C
+#define BNO055_GRAVITY_DATA_X_LSB_REG 0x2E
+#define BNO055_GRAVITY_DATA_Y_LSB_REG 0x30
+#define BNO055_GRAVITY_DATA_Z_LSB_REG 0x32
+#define BNO055_SCAN_CH_COUNT ((BNO055_GRAVITY_DATA_Z_LSB_REG - BNO055_ACC_DATA_X_LSB_REG) / 2)
+#define BNO055_TEMP_REG 0x34
+#define BNO055_CALIB_STAT_REG 0x35
+#define BNO055_CALIB_STAT_MAGN_SHIFT 0
+#define BNO055_CALIB_STAT_ACCEL_SHIFT 2
+#define BNO055_CALIB_STAT_GYRO_SHIFT 4
+#define BNO055_CALIB_STAT_SYS_SHIFT 6
+#define BNO055_SYS_ERR_REG 0x3A
+#define BNO055_POWER_MODE_REG 0x3E
+#define BNO055_POWER_MODE_NORMAL 0
+#define BNO055_SYS_TRIGGER_REG 0x3F
+#define BNO055_SYS_TRIGGER_RST_SYS BIT(5)
+#define BNO055_SYS_TRIGGER_CLK_SEL BIT(7)
+#define BNO055_OPR_MODE_REG 0x3D
+#define BNO055_OPR_MODE_CONFIG 0x0
+#define BNO055_OPR_MODE_AMG 0x7
+#define BNO055_OPR_MODE_FUSION_FMC_OFF 0xB
+#define BNO055_OPR_MODE_FUSION 0xC
+#define BNO055_UNIT_SEL_REG 0x3B
+/* Android orientation mode means: pitch value decreases turning clockwise */
+#define BNO055_UNIT_SEL_ANDROID BIT(7)
+#define BNO055_UNIT_SEL_GYR_RPS BIT(1)
+#define BNO055_CALDATA_START 0x55
+#define BNO055_CALDATA_END 0x6A
+#define BNO055_CALDATA_LEN 22
+
+/*
+ * The difference in address between the register that contains the
+ * value and the register that contains the offset. This applies for
+ * accel, gyro and magn channels.
+ */
+#define BNO055_REG_OFFSET_ADDR 0x4D
+
+/* page 1 registers */
+#define BNO055_PG1(x) ((x) | 0x80)
+#define BNO055_ACC_CONFIG_REG BNO055_PG1(0x8)
+#define BNO055_ACC_CONFIG_LPF_MASK GENMASK(4, 2)
+#define BNO055_ACC_CONFIG_RANGE_MASK GENMASK(1, 0)
+#define BNO055_MAG_CONFIG_REG BNO055_PG1(0x9)
+#define BNO055_MAG_CONFIG_HIGHACCURACY 0x18
+#define BNO055_MAG_CONFIG_ODR_MASK GENMASK(2, 0)
+#define BNO055_GYR_CONFIG_REG BNO055_PG1(0xA)
+#define BNO055_GYR_CONFIG_RANGE_MASK GENMASK(2, 0)
+#define BNO055_GYR_CONFIG_LPF_MASK GENMASK(5, 3)
+#define BNO055_GYR_AM_SET_REG BNO055_PG1(0x1F)
+#define BNO055_UID_LOWER_REG BNO055_PG1(0x50)
+#define BNO055_UID_HIGHER_REG BNO055_PG1(0x5F)
+#define BNO055_UID_LEN 16
+
+struct bno055_sysfs_attr {
+ int *vals;
+ int len;
+ int *fusion_vals;
+ int *hw_xlate;
+ int type;
+};
+
+static int bno055_acc_lpf_vals[] = {
+ 7, 810000, 15, 630000, 31, 250000, 62, 500000,
+ 125, 0, 250, 0, 500, 0, 1000, 0,
+};
+
+static struct bno055_sysfs_attr bno055_acc_lpf = {
+ .vals = bno055_acc_lpf_vals,
+ .len = ARRAY_SIZE(bno055_acc_lpf_vals),
+ .fusion_vals = (int[]){62, 500000},
+ .type = IIO_VAL_INT_PLUS_MICRO,
+};
+
+static int bno055_acc_range_vals[] = {
+ /* G: 2, 4, 8, 16 */
+ 1962, 3924, 7848, 15696
+};
+
+static struct bno055_sysfs_attr bno055_acc_range = {
+ .vals = bno055_acc_range_vals,
+ .len = ARRAY_SIZE(bno055_acc_range_vals),
+ .fusion_vals = (int[]){3924}, /* 4G */
+ .type = IIO_VAL_INT,
+};
+
+/*
+ * Theoretically the IMU should return data in a given (i.e. fixed) unit
+ * regardless of the range setting. This happens for the accelerometer, but not
+ * for the gyroscope; the gyroscope range setting affects the scale.
+ * This is probably due to this[0] bug.
+ * For this reason we map the internal range setting onto the standard IIO scale
+ * attribute for gyro.
+ * Since the bug[0] may be fixed in future, we check for the IMU FW version and
+ * eventually warn the user.
+ * Currently we just don't care about "range" attributes for gyro.
+ *
+ * [0] https://community.bosch-sensortec.com/t5/MEMS-sensors-forum/BNO055-Wrong-sensitivity-resolution-in-datasheet/td-p/10266
+ */
+
+/*
+ * dps = hwval * (dps_range/2^15)
+ * rps = hwval * (rps_range/2^15)
+ * = hwval * (dps_range/(2^15 * k))
+ * where k is rad-to-deg factor
+ */
+static int bno055_gyr_scale_vals[] = {
+ 125, 1877467, 250, 1877467, 500, 1877467,
+ 1000, 1877467, 2000, 1877467,
+};
+
+static struct bno055_sysfs_attr bno055_gyr_scale = {
+ .vals = bno055_gyr_scale_vals,
+ .len = ARRAY_SIZE(bno055_gyr_scale_vals),
+ .fusion_vals = (int[]){1, 900},
+ .hw_xlate = (int[]){4, 3, 2, 1, 0},
+ .type = IIO_VAL_FRACTIONAL,
+};
+
+static int bno055_gyr_lpf_vals[] = {12, 23, 32, 47, 64, 116, 230, 523};
+static struct bno055_sysfs_attr bno055_gyr_lpf = {
+ .vals = bno055_gyr_lpf_vals,
+ .len = ARRAY_SIZE(bno055_gyr_lpf_vals),
+ .fusion_vals = (int[]){32},
+ .hw_xlate = (int[]){5, 4, 7, 3, 6, 2, 1, 0},
+ .type = IIO_VAL_INT,
+};
+
+static int bno055_mag_odr_vals[] = {2, 6, 8, 10, 15, 20, 25, 30};
+static struct bno055_sysfs_attr bno055_mag_odr = {
+ .vals = bno055_mag_odr_vals,
+ .len = ARRAY_SIZE(bno055_mag_odr_vals),
+ .fusion_vals = (int[]){20},
+ .type = IIO_VAL_INT,
+};
+
+struct bno055_priv {
+ struct regmap *regmap;
+ struct device *dev;
+ struct clk *clk;
+ int operation_mode;
+ int xfer_burst_break_thr;
+ struct mutex lock;
+ u8 uid[BNO055_UID_LEN];
+ struct gpio_desc *reset_gpio;
+ bool sw_reset;
+ struct {
+ __le16 chans[BNO055_SCAN_CH_COUNT];
+ s64 timestamp __aligned(8);
+ } buf;
+ struct dentry *debugfs;
+};
+
+static bool bno055_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ /* data and status registers */
+ if (reg >= BNO055_ACC_DATA_X_LSB_REG && reg <= BNO055_SYS_ERR_REG)
+ return true;
+
+ /* when in fusion mode, config is updated by chip */
+ if (reg == BNO055_MAG_CONFIG_REG ||
+ reg == BNO055_ACC_CONFIG_REG ||
+ reg == BNO055_GYR_CONFIG_REG)
+ return true;
+
+ /* calibration data may be updated by the IMU */
+ if (reg >= BNO055_CALDATA_START && reg <= BNO055_CALDATA_END)
+ return true;
+
+ return false;
+}
+
+static bool bno055_regmap_readable(struct device *dev, unsigned int reg)
+{
+ /* unnamed PG0 reserved areas */
+ if ((reg < BNO055_PG1(0) && reg > BNO055_CALDATA_END) ||
+ reg == 0x3C)
+ return false;
+
+ /* unnamed PG1 reserved areas */
+ if (reg > BNO055_PG1(BNO055_UID_HIGHER_REG) ||
+ (reg < BNO055_PG1(BNO055_UID_LOWER_REG) && reg > BNO055_PG1(BNO055_GYR_AM_SET_REG)) ||
+ reg == BNO055_PG1(0xE) ||
+ (reg < BNO055_PG1(BNO055_PAGESEL_REG) && reg >= BNO055_PG1(0x0)))
+ return false;
+ return true;
+}
+
+static bool bno055_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ /*
+ * Unreadable registers are indeed reserved; there are no WO regs
+ * (except for a single bit in SYS_TRIGGER register)
+ */
+ if (!bno055_regmap_readable(dev, reg))
+ return false;
+
+ /* data and status registers */
+ if (reg >= BNO055_ACC_DATA_X_LSB_REG && reg <= BNO055_SYS_ERR_REG)
+ return false;
+
+ /* ID areas */
+ if (reg < BNO055_PAGESEL_REG ||
+ (reg <= BNO055_UID_HIGHER_REG && reg >= BNO055_UID_LOWER_REG))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_range_cfg bno055_regmap_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 0x7f * 2,
+ .selector_reg = BNO055_PAGESEL_REG,
+ .selector_mask = GENMASK(7, 0),
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x80,
+ },
+};
+
+const struct regmap_config bno055_regmap_config = {
+ .name = "bno055",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .ranges = bno055_regmap_ranges,
+ .num_ranges = 1,
+ .volatile_reg = bno055_regmap_volatile,
+ .max_register = 0x80 * 2,
+ .writeable_reg = bno055_regmap_writeable,
+ .readable_reg = bno055_regmap_readable,
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_NS_GPL(bno055_regmap_config, IIO_BNO055);
+
+/* must be called in configuration mode */
+static int bno055_calibration_load(struct bno055_priv *priv, const u8 *data, int len)
+{
+ if (len != BNO055_CALDATA_LEN) {
+ dev_dbg(priv->dev, "Invalid calibration file size %d (expected %d)",
+ len, BNO055_CALDATA_LEN);
+ return -EINVAL;
+ }
+
+ dev_dbg(priv->dev, "loading cal data: %*ph", BNO055_CALDATA_LEN, data);
+ return regmap_bulk_write(priv->regmap, BNO055_CALDATA_START,
+ data, BNO055_CALDATA_LEN);
+}
+
+static int bno055_operation_mode_do_set(struct bno055_priv *priv,
+ int operation_mode)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, BNO055_OPR_MODE_REG,
+ operation_mode);
+ if (ret)
+ return ret;
+
+ /* Following datasheet specifications: sensor takes 7mS up to 19 mS to switch mode */
+ msleep(20);
+
+ return 0;
+}
+
+static int bno055_system_reset(struct bno055_priv *priv)
+{
+ int ret;
+
+ if (priv->reset_gpio) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ } else if (priv->sw_reset) {
+ ret = regmap_write(priv->regmap, BNO055_SYS_TRIGGER_REG,
+ BNO055_SYS_TRIGGER_RST_SYS);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ regcache_drop_region(priv->regmap, 0x0, 0xff);
+ usleep_range(650000, 700000);
+
+ return 0;
+}
+
+static int bno055_init(struct bno055_priv *priv, const u8 *caldata, int len)
+{
+ int ret;
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, BNO055_POWER_MODE_REG,
+ BNO055_POWER_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, BNO055_SYS_TRIGGER_REG,
+ priv->clk ? BNO055_SYS_TRIGGER_CLK_SEL : 0);
+ if (ret)
+ return ret;
+
+ /* use standard SI units */
+ ret = regmap_write(priv->regmap, BNO055_UNIT_SEL_REG,
+ BNO055_UNIT_SEL_ANDROID | BNO055_UNIT_SEL_GYR_RPS);
+ if (ret)
+ return ret;
+
+ if (caldata) {
+ ret = bno055_calibration_load(priv, caldata, len);
+ if (ret)
+ dev_warn(priv->dev, "failed to load calibration data with error %d\n",
+ ret);
+ }
+
+ return 0;
+}
+
+static ssize_t bno055_operation_mode_set(struct bno055_priv *priv,
+ int operation_mode)
+{
+ u8 caldata[BNO055_CALDATA_LEN];
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ goto exit_unlock;
+
+ if (operation_mode == BNO055_OPR_MODE_FUSION ||
+ operation_mode == BNO055_OPR_MODE_FUSION_FMC_OFF) {
+ /* for entering fusion mode, reset the chip to clear the algo state */
+ ret = regmap_bulk_read(priv->regmap, BNO055_CALDATA_START, caldata,
+ BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_system_reset(priv);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_init(priv, caldata, BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+ }
+
+ ret = bno055_operation_mode_do_set(priv, operation_mode);
+ if (ret)
+ goto exit_unlock;
+
+ priv->operation_mode = operation_mode;
+
+exit_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void bno055_uninit(void *arg)
+{
+ struct bno055_priv *priv = arg;
+
+ /* stop the IMU */
+ bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+}
+
+#define BNO055_CHANNEL(_type, _axis, _index, _address, _sep, _sh, _avail) { \
+ .address = _address, \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | (_sep), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | (_sh), \
+ .info_mask_shared_by_type_available = _avail, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ .repeat = IIO_MOD_##_axis == IIO_MOD_QUATERNION ? 4 : 0, \
+ }, \
+}
+
+/* scan indexes follow DATA register order */
+enum bno055_scan_axis {
+ BNO055_SCAN_ACCEL_X,
+ BNO055_SCAN_ACCEL_Y,
+ BNO055_SCAN_ACCEL_Z,
+ BNO055_SCAN_MAGN_X,
+ BNO055_SCAN_MAGN_Y,
+ BNO055_SCAN_MAGN_Z,
+ BNO055_SCAN_GYRO_X,
+ BNO055_SCAN_GYRO_Y,
+ BNO055_SCAN_GYRO_Z,
+ BNO055_SCAN_YAW,
+ BNO055_SCAN_ROLL,
+ BNO055_SCAN_PITCH,
+ BNO055_SCAN_QUATERNION,
+ BNO055_SCAN_LIA_X,
+ BNO055_SCAN_LIA_Y,
+ BNO055_SCAN_LIA_Z,
+ BNO055_SCAN_GRAVITY_X,
+ BNO055_SCAN_GRAVITY_Y,
+ BNO055_SCAN_GRAVITY_Z,
+ BNO055_SCAN_TIMESTAMP,
+ _BNO055_SCAN_MAX
+};
+
+static const struct iio_chan_spec bno055_channels[] = {
+ /* accelerometer */
+ BNO055_CHANNEL(IIO_ACCEL, X, BNO055_SCAN_ACCEL_X,
+ BNO055_ACC_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ BNO055_CHANNEL(IIO_ACCEL, Y, BNO055_SCAN_ACCEL_Y,
+ BNO055_ACC_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ BNO055_CHANNEL(IIO_ACCEL, Z, BNO055_SCAN_ACCEL_Z,
+ BNO055_ACC_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ /* gyroscope */
+ BNO055_CHANNEL(IIO_ANGL_VEL, X, BNO055_SCAN_GYRO_X,
+ BNO055_GYR_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ BNO055_CHANNEL(IIO_ANGL_VEL, Y, BNO055_SCAN_GYRO_Y,
+ BNO055_GYR_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ BNO055_CHANNEL(IIO_ANGL_VEL, Z, BNO055_SCAN_GYRO_Z,
+ BNO055_GYR_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ /* magnetometer */
+ BNO055_CHANNEL(IIO_MAGN, X, BNO055_SCAN_MAGN_X,
+ BNO055_MAG_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ BNO055_CHANNEL(IIO_MAGN, Y, BNO055_SCAN_MAGN_Y,
+ BNO055_MAG_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ BNO055_CHANNEL(IIO_MAGN, Z, BNO055_SCAN_MAGN_Z,
+ BNO055_MAG_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ /* euler angle */
+ BNO055_CHANNEL(IIO_ROT, YAW, BNO055_SCAN_YAW,
+ BNO055_EUL_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ROT, ROLL, BNO055_SCAN_ROLL,
+ BNO055_EUL_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ROT, PITCH, BNO055_SCAN_PITCH,
+ BNO055_EUL_DATA_Z_LSB_REG, 0, 0, 0),
+ /* quaternion */
+ BNO055_CHANNEL(IIO_ROT, QUATERNION, BNO055_SCAN_QUATERNION,
+ BNO055_QUAT_DATA_W_LSB_REG, 0, 0, 0),
+
+ /* linear acceleration */
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_X, BNO055_SCAN_LIA_X,
+ BNO055_LIA_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_Y, BNO055_SCAN_LIA_Y,
+ BNO055_LIA_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_Z, BNO055_SCAN_LIA_Z,
+ BNO055_LIA_DATA_Z_LSB_REG, 0, 0, 0),
+
+ /* gravity vector */
+ BNO055_CHANNEL(IIO_GRAVITY, X, BNO055_SCAN_GRAVITY_X,
+ BNO055_GRAVITY_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_GRAVITY, Y, BNO055_SCAN_GRAVITY_Y,
+ BNO055_GRAVITY_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_GRAVITY, Z, BNO055_SCAN_GRAVITY_Z,
+ BNO055_GRAVITY_DATA_Z_LSB_REG, 0, 0, 0),
+
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1,
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(BNO055_SCAN_TIMESTAMP),
+};
+
+static int bno055_get_regmask(struct bno055_priv *priv, int *val, int *val2,
+ int reg, int mask, struct bno055_sysfs_attr *attr)
+{
+ const int shift = __ffs(mask);
+ int hwval, idx;
+ int ret;
+ int i;
+
+ ret = regmap_read(priv->regmap, reg, &hwval);
+ if (ret)
+ return ret;
+
+ idx = (hwval & mask) >> shift;
+ if (attr->hw_xlate)
+ for (i = 0; i < attr->len; i++)
+ if (attr->hw_xlate[i] == idx) {
+ idx = i;
+ break;
+ }
+ if (attr->type == IIO_VAL_INT) {
+ *val = attr->vals[idx];
+ } else { /* IIO_VAL_INT_PLUS_MICRO or IIO_VAL_FRACTIONAL */
+ *val = attr->vals[idx * 2];
+ *val2 = attr->vals[idx * 2 + 1];
+ }
+
+ return attr->type;
+}
+
+static int bno055_set_regmask(struct bno055_priv *priv, int val, int val2,
+ int reg, int mask, struct bno055_sysfs_attr *attr)
+{
+ const int shift = __ffs(mask);
+ int best_delta;
+ int req_val;
+ int tbl_val;
+ bool first;
+ int delta;
+ int hwval;
+ int ret;
+ int len;
+ int i;
+
+ /*
+ * The closest value the HW supports is only one in fusion mode,
+ * and it is autoselected, so don't do anything, just return OK,
+ * as the closest possible value has been (virtually) selected
+ */
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return 0;
+
+ len = attr->len;
+
+ /*
+ * We always get a request in INT_PLUS_MICRO, but we
+ * take care of the micro part only when we really have
+ * non-integer tables. This prevents 32-bit overflow with
+ * larger integers contained in integer tables.
+ */
+ req_val = val;
+ if (attr->type != IIO_VAL_INT) {
+ len /= 2;
+ req_val = min(val, 2147) * 1000000 + val2;
+ }
+
+ first = true;
+ for (i = 0; i < len; i++) {
+ switch (attr->type) {
+ case IIO_VAL_INT:
+ tbl_val = attr->vals[i];
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ WARN_ON(attr->vals[i * 2] > 2147);
+ tbl_val = attr->vals[i * 2] * 1000000 +
+ attr->vals[i * 2 + 1];
+ break;
+ case IIO_VAL_FRACTIONAL:
+ WARN_ON(attr->vals[i * 2] > 4294);
+ tbl_val = attr->vals[i * 2] * 1000000 /
+ attr->vals[i * 2 + 1];
+ break;
+ default:
+ return -EINVAL;
+ }
+ delta = abs(tbl_val - req_val);
+ if (delta < best_delta || first) {
+ best_delta = delta;
+ hwval = i;
+ first = false;
+ }
+ }
+
+ if (attr->hw_xlate)
+ hwval = attr->hw_xlate[hwval];
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, hwval << shift);
+ if (ret)
+ return ret;
+
+ return bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_AMG);
+}
+
+static int bno055_read_simple_chan(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ __le16 raw_val;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_bulk_read(priv->regmap, chan->address,
+ &raw_val, sizeof(raw_val));
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ *val = 0;
+ } else {
+ ret = regmap_bulk_read(priv->regmap,
+ chan->address +
+ BNO055_REG_OFFSET_ADDR,
+ &raw_val, sizeof(raw_val));
+ if (ret < 0)
+ return ret;
+ /*
+ * IMU reports sensor offsets; IIO wants correction
+ * offsets, thus we need the 'minus' here.
+ */
+ *val = -sign_extend32(le16_to_cpu(raw_val), 15);
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ switch (chan->type) {
+ case IIO_GRAVITY:
+ /* Table 3-35: 1 m/s^2 = 100 LSB */
+ case IIO_ACCEL:
+ /* Table 3-17: 1 m/s^2 = 100 LSB */
+ *val2 = 100;
+ break;
+ case IIO_MAGN:
+ /*
+ * Table 3-19: 1 uT = 16 LSB. But we need
+ * Gauss: 1G = 0.1 uT.
+ */
+ *val2 = 160;
+ break;
+ case IIO_ANGL_VEL:
+ /*
+ * Table 3-22: 1 Rps = 900 LSB
+ * .. but this is not exactly true. See comment at the
+ * beginning of this file.
+ */
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ *val = bno055_gyr_scale.fusion_vals[0];
+ *val2 = bno055_gyr_scale.fusion_vals[1];
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_RANGE_MASK,
+ &bno055_gyr_scale);
+ break;
+ case IIO_ROT:
+ /* Table 3-28: 1 degree = 16 LSB */
+ *val2 = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type != IIO_MAGN)
+ return -EINVAL;
+
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_MAG_CONFIG_REG,
+ BNO055_MAG_CONFIG_ODR_MASK,
+ &bno055_mag_odr);
+
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_LPF_MASK,
+ &bno055_gyr_lpf);
+ case IIO_ACCEL:
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_LPF_MASK,
+ &bno055_acc_lpf);
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_sysfs_attr_avail(struct bno055_priv *priv, struct bno055_sysfs_attr *attr,
+ const int **vals, int *length)
+{
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ /* locked when fusion enabled */
+ *vals = attr->fusion_vals;
+ if (attr->type == IIO_VAL_INT)
+ *length = 1;
+ else
+ *length = 2; /* IIO_VAL_INT_PLUS_MICRO or IIO_VAL_FRACTIONAL*/
+ } else {
+ *vals = attr->vals;
+ *length = attr->len;
+ }
+
+ return attr->type;
+}
+
+static int bno055_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_gyr_scale,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_gyr_lpf,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ case IIO_ACCEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_acc_lpf,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_MAGN:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_mag_odr,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_read_temp_chan(struct iio_dev *indio_dev, int *val)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ unsigned int raw_val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, BNO055_TEMP_REG, &raw_val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Tables 3-36 and 3-37: one byte of priv, signed, 1 LSB = 1C.
+ * ABI wants milliC.
+ */
+ *val = raw_val * 1000;
+
+ return IIO_VAL_INT;
+}
+
+static int bno055_read_quaternion(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ __le16 raw_vals[4];
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (size < 4)
+ return -EINVAL;
+ ret = regmap_bulk_read(priv->regmap,
+ BNO055_QUAT_DATA_W_LSB_REG,
+ raw_vals, sizeof(raw_vals));
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < 4; i++)
+ vals[i] = sign_extend32(le16_to_cpu(raw_vals[i]), 15);
+ *val_len = 4;
+ return IIO_VAL_INT_MULTIPLE;
+ case IIO_CHAN_INFO_SCALE:
+ /* Table 3-31: 1 quaternion = 2^14 LSB */
+ if (size < 2)
+ return -EINVAL;
+ vals[0] = 1;
+ vals[1] = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool bno055_is_chan_readable(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return true;
+
+ switch (chan->type) {
+ case IIO_GRAVITY:
+ case IIO_ROT:
+ return false;
+ case IIO_ACCEL:
+ if (chan->channel2 == IIO_MOD_LINEAR_X ||
+ chan->channel2 == IIO_MOD_LINEAR_Y ||
+ chan->channel2 == IIO_MOD_LINEAR_Z)
+ return false;
+ return true;
+ default:
+ return true;
+ }
+}
+
+static int _bno055_read_raw_multi(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ if (!bno055_is_chan_readable(indio_dev, chan))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ case IIO_GRAVITY:
+ if (size < 2)
+ return -EINVAL;
+ *val_len = 2;
+ return bno055_read_simple_chan(indio_dev, chan,
+ &vals[0], &vals[1],
+ mask);
+ case IIO_TEMP:
+ *val_len = 1;
+ return bno055_read_temp_chan(indio_dev, &vals[0]);
+ case IIO_ROT:
+ /*
+ * Rotation is exposed as either a quaternion or three
+ * Euler angles.
+ */
+ if (chan->channel2 == IIO_MOD_QUATERNION)
+ return bno055_read_quaternion(indio_dev, chan,
+ size, vals,
+ val_len, mask);
+ if (size < 2)
+ return -EINVAL;
+ *val_len = 2;
+ return bno055_read_simple_chan(indio_dev, chan,
+ &vals[0], &vals[1],
+ mask);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_read_raw_multi(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = _bno055_read_raw_multi(indio_dev, chan, size,
+ vals, val_len, mask);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int _bno055_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_MAG_CONFIG_REG,
+ BNO055_MAG_CONFIG_ODR_MASK,
+ &bno055_mag_odr);
+ default:
+ return -EINVAL;
+ }
+ case IIO_ACCEL:
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_LPF_MASK,
+ &bno055_acc_lpf);
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_ANGL_VEL:
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_LPF_MASK,
+ &bno055_gyr_lpf);
+ case IIO_CHAN_INFO_SCALE:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_RANGE_MASK,
+ &bno055_gyr_scale);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = _bno055_write_raw(iio_dev, chan, val, val2, mask);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static ssize_t in_accel_range_raw_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int len = 0;
+ int i;
+
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return sysfs_emit(buf, "%d\n", bno055_acc_range.fusion_vals[0]);
+
+ for (i = 0; i < bno055_acc_range.len; i++)
+ len += sysfs_emit_at(buf, len, "%d ", bno055_acc_range.vals[i]);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t fusion_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%d\n",
+ priv->operation_mode != BNO055_OPR_MODE_AMG);
+}
+
+static ssize_t fusion_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ bool en;
+ int ret;
+
+ if (indio_dev->active_scan_mask &&
+ !bitmap_empty(indio_dev->active_scan_mask, _BNO055_SCAN_MAX))
+ return -EBUSY;
+
+ ret = kstrtobool(buf, &en);
+ if (ret)
+ return -EINVAL;
+
+ if (!en)
+ return bno055_operation_mode_set(priv, BNO055_OPR_MODE_AMG) ?: len;
+
+ /*
+ * Coming from AMG means the FMC was off, just switch to fusion but
+ * don't change anything that doesn't belong to us (i.e let FMC stay off).
+ * Coming from any other fusion mode means we don't need to do anything.
+ */
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG)
+ return bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION_FMC_OFF) ?: len;
+
+ return len;
+}
+
+static ssize_t in_magn_calibration_fast_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%d\n",
+ priv->operation_mode == BNO055_OPR_MODE_FUSION);
+}
+
+static ssize_t in_magn_calibration_fast_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ if (indio_dev->active_scan_mask &&
+ !bitmap_empty(indio_dev->active_scan_mask, _BNO055_SCAN_MAX))
+ return -EBUSY;
+
+ if (sysfs_streq(buf, "0")) {
+ if (priv->operation_mode == BNO055_OPR_MODE_FUSION) {
+ ret = bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION_FMC_OFF);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG)
+ return -EINVAL;
+
+ if (priv->operation_mode != BNO055_OPR_MODE_FUSION) {
+ ret = bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t in_accel_range_raw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int val;
+ int ret;
+
+ ret = bno055_get_regmask(priv, &val, NULL,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_RANGE_MASK,
+ &bno055_acc_range);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t in_accel_range_raw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->lock);
+ ret = bno055_set_regmask(priv, val, 0,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_RANGE_MASK,
+ &bno055_acc_range);
+ mutex_unlock(&priv->lock);
+
+ return ret ?: len;
+}
+
+static ssize_t bno055_get_calib_status(struct device *dev, char *buf, int which)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int calib;
+ int ret;
+ int val;
+
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG ||
+ (priv->operation_mode == BNO055_OPR_MODE_FUSION_FMC_OFF &&
+ which == BNO055_CALIB_STAT_MAGN_SHIFT)) {
+ calib = 0;
+ } else {
+ mutex_lock(&priv->lock);
+ ret = regmap_read(priv->regmap, BNO055_CALIB_STAT_REG, &val);
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return -EIO;
+
+ calib = ((val >> which) & GENMASK(1, 0)) + 1;
+ }
+
+ return sysfs_emit(buf, "%d\n", calib);
+}
+
+static ssize_t serialnumber_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%*ph\n", BNO055_UID_LEN, priv->uid);
+}
+
+static ssize_t calibration_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(kobj_to_dev(kobj)));
+ u8 data[BNO055_CALDATA_LEN];
+ int ret;
+
+ /*
+ * Calibration data is volatile; reading it in chunks will possibly
+ * results in inconsistent data. We require the user to read the whole
+ * blob in a single chunk
+ */
+ if (count < BNO055_CALDATA_LEN || pos)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ goto exit_unlock;
+
+ ret = regmap_bulk_read(priv->regmap, BNO055_CALDATA_START, data,
+ BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_operation_mode_do_set(priv, priv->operation_mode);
+ if (ret)
+ goto exit_unlock;
+
+ memcpy(buf, data, BNO055_CALDATA_LEN);
+
+ ret = BNO055_CALDATA_LEN;
+exit_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static ssize_t sys_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_SYS_SHIFT);
+}
+
+static ssize_t in_accel_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_ACCEL_SHIFT);
+}
+
+static ssize_t in_gyro_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_GYRO_SHIFT);
+}
+
+static ssize_t in_magn_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_MAGN_SHIFT);
+}
+
+static int bno055_debugfs_reg_access(struct iio_dev *iio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ if (readval)
+ return regmap_read(priv->regmap, reg, readval);
+ else
+ return regmap_write(priv->regmap, reg, writeval);
+}
+
+static ssize_t bno055_show_fw_version(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct bno055_priv *priv = file->private_data;
+ int rev, ver;
+ char *buf;
+ int ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_LSB_REG, &rev);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_MSB_REG, &ver);
+ if (ret)
+ return ret;
+
+ buf = kasprintf(GFP_KERNEL, "ver: 0x%x, rev: 0x%x\n", ver, rev);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations bno055_fw_version_ops = {
+ .open = simple_open,
+ .read = bno055_show_fw_version,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void bno055_debugfs_remove(void *_priv)
+{
+ struct bno055_priv *priv = _priv;
+
+ debugfs_remove(priv->debugfs);
+ priv->debugfs = NULL;
+}
+
+static void bno055_debugfs_init(struct iio_dev *iio_dev)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ priv->debugfs = debugfs_create_file("firmware_version", 0400,
+ iio_get_debugfs_dentry(iio_dev),
+ priv, &bno055_fw_version_ops);
+ if (!IS_ERR(priv->debugfs))
+ devm_add_action_or_reset(priv->dev, bno055_debugfs_remove,
+ priv);
+ if (IS_ERR_OR_NULL(priv->debugfs))
+ dev_warn(priv->dev, "failed to setup debugfs");
+}
+
+static IIO_DEVICE_ATTR_RW(fusion_enable, 0);
+static IIO_DEVICE_ATTR_RW(in_magn_calibration_fast_enable, 0);
+static IIO_DEVICE_ATTR_RW(in_accel_range_raw, 0);
+
+static IIO_DEVICE_ATTR_RO(in_accel_range_raw_available, 0);
+static IIO_DEVICE_ATTR_RO(sys_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_accel_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_gyro_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_magn_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(serialnumber, 0);
+
+static struct attribute *bno055_attrs[] = {
+ &iio_dev_attr_in_accel_range_raw_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_range_raw.dev_attr.attr,
+ &iio_dev_attr_fusion_enable.dev_attr.attr,
+ &iio_dev_attr_in_magn_calibration_fast_enable.dev_attr.attr,
+ &iio_dev_attr_sys_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_accel_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_gyro_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_magn_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_serialnumber.dev_attr.attr,
+ NULL
+};
+
+static BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
+
+static struct bin_attribute *bno055_bin_attrs[] = {
+ &bin_attr_calibration_data,
+ NULL
+};
+
+static const struct attribute_group bno055_attrs_group = {
+ .attrs = bno055_attrs,
+ .bin_attrs = bno055_bin_attrs,
+};
+
+static const struct iio_info bno055_info = {
+ .read_raw_multi = bno055_read_raw_multi,
+ .read_avail = bno055_read_avail,
+ .write_raw = bno055_write_raw,
+ .attrs = &bno055_attrs_group,
+ .debugfs_reg_access = bno055_debugfs_reg_access,
+};
+
+/*
+ * Reads len samples from the HW, stores them in buf starting from buf_idx,
+ * and applies mask to cull (skip) unneeded samples.
+ * Updates buf_idx incrementing with the number of stored samples.
+ * Samples from HW are transferred into buf, then in-place copy on buf is
+ * performed in order to cull samples that need to be skipped.
+ * This avoids copies of the first samples until we hit the 1st sample to skip,
+ * and also avoids having an extra bounce buffer.
+ * buf must be able to contain len elements in spite of how many samples we are
+ * going to cull.
+ */
+static int bno055_scan_xfer(struct bno055_priv *priv,
+ int start_ch, int len, unsigned long mask,
+ __le16 *buf, int *buf_idx)
+{
+ const int base = BNO055_ACC_DATA_X_LSB_REG;
+ bool quat_in_read = false;
+ int buf_base = *buf_idx;
+ __le16 *dst, *src;
+ int offs_fixup = 0;
+ int xfer_len = len;
+ int ret;
+ int i, n;
+
+ if (!mask)
+ return 0;
+
+ /*
+ * All channels are made up 1 16-bit sample, except for quaternion that
+ * is made up 4 16-bit values.
+ * For us the quaternion CH is just like 4 regular CHs.
+ * If our read starts past the quaternion make sure to adjust the
+ * starting offset; if the quaternion is contained in our scan then make
+ * sure to adjust the read len.
+ */
+ if (start_ch > BNO055_SCAN_QUATERNION) {
+ start_ch += 3;
+ } else if ((start_ch <= BNO055_SCAN_QUATERNION) &&
+ ((start_ch + len) > BNO055_SCAN_QUATERNION)) {
+ quat_in_read = true;
+ xfer_len += 3;
+ }
+
+ ret = regmap_bulk_read(priv->regmap,
+ base + start_ch * sizeof(__le16),
+ buf + buf_base,
+ xfer_len * sizeof(__le16));
+ if (ret)
+ return ret;
+
+ for_each_set_bit(i, &mask, len) {
+ if (quat_in_read && ((start_ch + i) > BNO055_SCAN_QUATERNION))
+ offs_fixup = 3;
+
+ dst = buf + *buf_idx;
+ src = buf + buf_base + offs_fixup + i;
+
+ n = (start_ch + i == BNO055_SCAN_QUATERNION) ? 4 : 1;
+
+ if (dst != src)
+ memcpy(dst, src, n * sizeof(__le16));
+
+ *buf_idx += n;
+ }
+ return 0;
+}
+
+static irqreturn_t bno055_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct bno055_priv *priv = iio_priv(iio_dev);
+ int xfer_start, start, end, prev_end;
+ unsigned long mask;
+ int quat_extra_len;
+ bool first = true;
+ int buf_idx = 0;
+ bool thr_hit;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Walk the bitmap and eventually perform several transfers.
+ * Bitmap ones-fields that are separated by gaps <= xfer_burst_break_thr
+ * will be included in same transfer.
+ * Every time the bitmap contains a gap wider than xfer_burst_break_thr
+ * then we split the transfer, skipping the gap.
+ */
+ for_each_set_bitrange(start, end, iio_dev->active_scan_mask,
+ iio_dev->masklength) {
+ /*
+ * First transfer will start from the beginning of the first
+ * ones-field in the bitmap
+ */
+ if (first) {
+ xfer_start = start;
+ } else {
+ /*
+ * We found the next ones-field; check whether to
+ * include it in * the current transfer or not (i.e.
+ * let's perform the current * transfer and prepare for
+ * another one).
+ */
+
+ /*
+ * In case the zeros-gap contains the quaternion bit,
+ * then its length is actually 4 words instead of 1
+ * (i.e. +3 wrt other channels).
+ */
+ quat_extra_len = ((start > BNO055_SCAN_QUATERNION) &&
+ (prev_end <= BNO055_SCAN_QUATERNION)) ? 3 : 0;
+
+ /* If the gap is wider than xfer_burst_break_thr then.. */
+ thr_hit = (start - prev_end + quat_extra_len) >
+ priv->xfer_burst_break_thr;
+
+ /*
+ * .. transfer all the data up to the gap. Then set the
+ * next transfer start index at right after the gap
+ * (i.e. at the start of this ones-field).
+ */
+ if (thr_hit) {
+ mask = *iio_dev->active_scan_mask >> xfer_start;
+ ret = bno055_scan_xfer(priv, xfer_start,
+ prev_end - xfer_start,
+ mask, priv->buf.chans, &buf_idx);
+ if (ret)
+ goto done;
+ xfer_start = start;
+ }
+ }
+ first = false;
+ prev_end = end;
+ }
+
+ /*
+ * We finished walking the bitmap; no more gaps to check for. Just
+ * perform the current transfer.
+ */
+ mask = *iio_dev->active_scan_mask >> xfer_start;
+ ret = bno055_scan_xfer(priv, xfer_start,
+ prev_end - xfer_start,
+ mask, priv->buf.chans, &buf_idx);
+
+ if (!ret)
+ iio_push_to_buffers_with_timestamp(iio_dev,
+ &priv->buf, pf->timestamp);
+done:
+ mutex_unlock(&priv->lock);
+ iio_trigger_notify_done(iio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int bno055_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ const unsigned long fusion_mask =
+ BIT(BNO055_SCAN_YAW) |
+ BIT(BNO055_SCAN_ROLL) |
+ BIT(BNO055_SCAN_PITCH) |
+ BIT(BNO055_SCAN_QUATERNION) |
+ BIT(BNO055_SCAN_LIA_X) |
+ BIT(BNO055_SCAN_LIA_Y) |
+ BIT(BNO055_SCAN_LIA_Z) |
+ BIT(BNO055_SCAN_GRAVITY_X) |
+ BIT(BNO055_SCAN_GRAVITY_Y) |
+ BIT(BNO055_SCAN_GRAVITY_Z);
+
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG &&
+ bitmap_intersects(indio_dev->active_scan_mask, &fusion_mask,
+ _BNO055_SCAN_MAX))
+ return -EBUSY;
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bno055_buffer_setup_ops = {
+ .preenable = bno055_buffer_preenable,
+};
+
+int bno055_probe(struct device *dev, struct regmap *regmap,
+ int xfer_burst_break_thr, bool sw_reset)
+{
+ const struct firmware *caldata = NULL;
+ struct bno055_priv *priv;
+ struct iio_dev *iio_dev;
+ char *fw_name_buf;
+ unsigned int val;
+ int rev, ver;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ iio_dev->name = "bno055";
+ priv = iio_priv(iio_dev);
+ mutex_init(&priv->lock);
+ priv->regmap = regmap;
+ priv->dev = dev;
+ priv->xfer_burst_break_thr = xfer_burst_break_thr;
+ priv->sw_reset = sw_reset;
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n");
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "clk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get CLK\n");
+
+ if (priv->reset_gpio) {
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(650000, 750000);
+ } else if (!sw_reset) {
+ dev_warn(dev, "No usable reset method; IMU may be unreliable\n");
+ }
+
+ ret = regmap_read(priv->regmap, BNO055_CHIP_ID_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val != BNO055_CHIP_ID_MAGIC)
+ dev_warn(dev, "Unrecognized chip ID 0x%x\n", val);
+
+ /*
+ * In case we haven't a HW reset pin, we can still reset the chip via
+ * register write. This is probably nonsense in case we can't even
+ * communicate with the chip or the chip isn't the one we expect (i.e.
+ * we don't write to unknown chips), so we perform SW reset only after
+ * chip magic ID check
+ */
+ if (!priv->reset_gpio) {
+ ret = bno055_system_reset(priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_LSB_REG, &rev);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_MSB_REG, &ver);
+ if (ret)
+ return ret;
+
+ /*
+ * The stock FW version contains a bug (see comment at the beginning of
+ * this file) that causes the anglvel scale to be changed depending on
+ * the chip range setting. We workaround this, but we don't know what
+ * other FW versions might do.
+ */
+ if (ver != 0x3 || rev != 0x11)
+ dev_warn(dev, "Untested firmware version. Anglvel scale may not work as expected\n");
+
+ ret = regmap_bulk_read(priv->regmap, BNO055_UID_LOWER_REG,
+ priv->uid, BNO055_UID_LEN);
+ if (ret)
+ return ret;
+
+ /* Sensor calibration data */
+ fw_name_buf = kasprintf(GFP_KERNEL, BNO055_FW_UID_FMT,
+ BNO055_UID_LEN, priv->uid);
+ if (!fw_name_buf)
+ return -ENOMEM;
+
+ ret = request_firmware(&caldata, fw_name_buf, dev);
+ kfree(fw_name_buf);
+ if (ret)
+ ret = request_firmware(&caldata, BNO055_FW_GENERIC_NAME, dev);
+ if (ret) {
+ dev_notice(dev, "Calibration file load failed. See instruction in kernel Documentation/iio/bno055.rst\n");
+ ret = bno055_init(priv, NULL, 0);
+ } else {
+ ret = bno055_init(priv, caldata->data, caldata->size);
+ release_firmware(caldata);
+ }
+ if (ret)
+ return ret;
+
+ priv->operation_mode = BNO055_OPR_MODE_FUSION;
+ ret = bno055_operation_mode_do_set(priv, priv->operation_mode);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, bno055_uninit, priv);
+ if (ret)
+ return ret;
+
+ iio_dev->channels = bno055_channels;
+ iio_dev->num_channels = ARRAY_SIZE(bno055_channels);
+ iio_dev->info = &bno055_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, iio_dev,
+ iio_pollfunc_store_time,
+ bno055_trigger_handler,
+ &bno055_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, iio_dev);
+ if (ret)
+ return ret;
+
+ bno055_debugfs_init(iio_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(bno055_probe, IIO_BNO055);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@iit.it>");
+MODULE_DESCRIPTION("Bosch BNO055 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055.h b/drivers/iio/imu/bno055/bno055.h
new file mode 100644
index 000000000000..64f9fc95cebc
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __BNO055_H__
+#define __BNO055_H__
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct device;
+int bno055_probe(struct device *dev, struct regmap *regmap,
+ int xfer_burst_break_thr, bool sw_reset);
+extern const struct regmap_config bno055_regmap_config;
+
+#endif
diff --git a/drivers/iio/imu/bno055/bno055_i2c.c b/drivers/iio/imu/bno055/bno055_i2c.c
new file mode 100644
index 000000000000..c1bbc0fe34f9
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_i2c.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for I2C-interfaced Bosch BNO055 IMU.
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bno055.h"
+
+#define BNO055_I2C_XFER_BURST_BREAK_THRESHOLD 3
+
+static int bno055_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &bno055_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "Unable to init register map");
+
+ return bno055_probe(&client->dev, regmap,
+ BNO055_I2C_XFER_BURST_BREAK_THRESHOLD, true);
+}
+
+static const struct i2c_device_id bno055_i2c_id[] = {
+ {"bno055", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bno055_i2c_id);
+
+static const struct of_device_id __maybe_unused bno055_i2c_of_match[] = {
+ { .compatible = "bosch,bno055" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bno055_i2c_of_match);
+
+static struct i2c_driver bno055_driver = {
+ .driver = {
+ .name = "bno055-i2c",
+ .of_match_table = bno055_i2c_of_match,
+ },
+ .probe_new = bno055_i2c_probe,
+ .id_table = bno055_i2c_id,
+};
+module_i2c_driver(bno055_driver);
+
+MODULE_AUTHOR("Andrea Merello");
+MODULE_DESCRIPTION("Bosch BNO055 I2C interface");
+MODULE_IMPORT_NS(IIO_BNO055);
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
new file mode 100644
index 000000000000..57728a568471
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Serial line interface for Bosh BNO055 IMU (via serdev).
+ * This file implements serial communication up to the register read/write
+ * level.
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ *
+ * This driver is based on
+ * Plantower PMS7003 particulate matter sensor driver
+ * Which is
+ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/serdev.h>
+
+#include "bno055_ser_trace.h"
+#include "bno055.h"
+
+/*
+ * Register writes cmd have the following format
+ * +------+------+-----+-----+----- ... ----+
+ * | 0xAA | 0xOO | REG | LEN | payload[LEN] |
+ * +------+------+-----+-----+----- ... ----+
+ *
+ * Register write responses have the following format
+ * +------+----------+
+ * | 0xEE | ERROCODE |
+ * +------+----------+
+ *
+ * .. except when writing the SYS_RST bit (i.e. triggering a system reset); in
+ * case the IMU accepts the command, then it resets without responding. We don't
+ * handle this (yet) here (so we inform the common bno055 code not to perform
+ * sw resets - bno055 on serial bus basically requires the hw reset pin).
+ *
+ * Register read have the following format
+ * +------+------+-----+-----+
+ * | 0xAA | 0xO1 | REG | LEN |
+ * +------+------+-----+-----+
+ *
+ * Successful register read response have the following format
+ * +------+-----+----- ... ----+
+ * | 0xBB | LEN | payload[LEN] |
+ * +------+-----+----- ... ----+
+ *
+ * Failed register read response have the following format
+ * +------+--------+
+ * | 0xEE | ERRCODE| (ERRCODE always > 1)
+ * +------+--------+
+ *
+ * Error codes are
+ * 01: OK
+ * 02: read/write FAIL
+ * 04: invalid address
+ * 05: write on RO
+ * 06: wrong start byte
+ * 07: bus overrun
+ * 08: len too high
+ * 09: len too low
+ * 10: bus RX byte timeout (timeout is 30mS)
+ *
+ *
+ * **WORKAROUND ALERT**
+ *
+ * Serial communication seems very fragile: the BNO055 buffer seems to overflow
+ * very easy; BNO055 seems able to sink few bytes, then it needs a brief pause.
+ * On the other hand, it is also picky on timeout: if there is a pause > 30mS in
+ * between two bytes then the transaction fails (IMU internal RX FSM resets).
+ *
+ * BNO055 has been seen also failing to process commands in case we send them
+ * too close each other (or if it is somehow busy?)
+ *
+ * In particular I saw these scenarios:
+ * 1) If we send 2 bytes per time, then the IMU never(?) overflows.
+ * 2) If we send 4 bytes per time (i.e. the full header), then the IMU could
+ * overflow, but it seem to sink all 4 bytes, then it returns error.
+ * 3) If we send more than 4 bytes, the IMU could overflow, and I saw it sending
+ * error after 4 bytes are sent; we have troubles in synchronizing again,
+ * because we are still sending data, and the IMU interprets it as the 1st
+ * byte of a new command.
+ *
+ * While we must avoid case 3, we could send 4 bytes per time and eventually
+ * retry in case of failure; this seemed convenient for reads (which requires
+ * TXing exactly 4 bytes), however it has been seen that, depending by the IMU
+ * settings (e.g. LPF), failures became less or more frequent; in certain IMU
+ * configurations they are very rare, but in certain others we keeps failing
+ * even after like 30 retries.
+ *
+ * So, we just split TXes in [2-bytes + delay] steps, and still keep an eye on
+ * the IMU response; in case it overflows (which is now unlikely), we retry.
+ */
+
+/*
+ * Read operation overhead:
+ * 4 bytes req + 2byte resp hdr.
+ * 6 bytes = 60 bit (considering 1start + 1stop bits).
+ * 60/115200 = ~520uS + about 2500mS delay -> ~3mS
+ * In 3mS we could read back about 34 bytes that means 17 samples, this means
+ * that in case of scattered reads in which the gap is 17 samples or less it is
+ * still convenient to go for a burst.
+ * We have to take into account also IMU response time - IMU seems to be often
+ * reasonably quick to respond, but sometimes it seems to be in some "critical
+ * section" in which it delays handling of serial protocol. Because of this we
+ * round-up to 22, which is the max number of samples, always bursting indeed.
+ */
+#define BNO055_SER_XFER_BURST_BREAK_THRESHOLD 22
+
+struct bno055_ser_priv {
+ enum {
+ CMD_NONE,
+ CMD_READ,
+ CMD_WRITE,
+ } expect_response;
+ int expected_data_len;
+ u8 *response_buf;
+
+ /**
+ * enum cmd_status - represent the status of a command sent to the HW.
+ * @STATUS_CRIT: The command failed: the serial communication failed.
+ * @STATUS_OK: The command executed successfully.
+ * @STATUS_FAIL: The command failed: HW responded with an error.
+ */
+ enum {
+ STATUS_CRIT = -1,
+ STATUS_OK = 0,
+ STATUS_FAIL = 1,
+ } cmd_status;
+
+ /*
+ * Protects all the above fields, which are accessed in behalf of both
+ * the serdev RX callback and the regmap side
+ */
+ struct mutex lock;
+
+ /* Only accessed in serdev RX callback context*/
+ struct {
+ enum {
+ RX_IDLE,
+ RX_START,
+ RX_DATA,
+ } state;
+ int databuf_count;
+ int expected_len;
+ int type;
+ } rx;
+
+ /* Never accessed in behalf of serdev RX callback context */
+ bool cmd_stale;
+
+ struct completion cmd_complete;
+ struct serdev_device *serdev;
+};
+
+static int bno055_ser_send_chunk(struct bno055_ser_priv *priv, const u8 *data, int len)
+{
+ int ret;
+
+ trace_send_chunk(len, data);
+ ret = serdev_device_write(priv->serdev, data, len, msecs_to_jiffies(25));
+ if (ret < 0)
+ return ret;
+
+ if (ret < len)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Send a read or write command.
+ * 'data' can be NULL (used in read case). 'len' parameter is always valid; in
+ * case 'data' is non-NULL then it must match 'data' size.
+ */
+static int bno055_ser_do_send_cmd(struct bno055_ser_priv *priv,
+ bool read, int addr, int len, const u8 *data)
+{
+ u8 hdr[] = {0xAA, read, addr, len};
+ int chunk_len;
+ int ret;
+
+ ret = bno055_ser_send_chunk(priv, hdr, 2);
+ if (ret)
+ goto fail;
+ usleep_range(2000, 3000);
+ ret = bno055_ser_send_chunk(priv, hdr + 2, 2);
+ if (ret)
+ goto fail;
+
+ if (read)
+ return 0;
+
+ while (len) {
+ chunk_len = min(len, 2);
+ usleep_range(2000, 3000);
+ ret = bno055_ser_send_chunk(priv, data, chunk_len);
+ if (ret)
+ goto fail;
+ data += chunk_len;
+ len -= chunk_len;
+ }
+
+ return 0;
+fail:
+ /* waiting more than 30mS should clear the BNO055 internal state */
+ usleep_range(40000, 50000);
+ return ret;
+}
+
+static int bno055_ser_send_cmd(struct bno055_ser_priv *priv,
+ bool read, int addr, int len, const u8 *data)
+{
+ const int retry_max = 5;
+ int retry = retry_max;
+ int ret = 0;
+
+ /*
+ * In case previous command was interrupted we still need to wait it to
+ * complete before we can issue new commands
+ */
+ if (priv->cmd_stale) {
+ ret = wait_for_completion_interruptible_timeout(&priv->cmd_complete,
+ msecs_to_jiffies(100));
+ if (ret == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ priv->cmd_stale = false;
+ /* if serial protocol broke, bail out */
+ if (priv->cmd_status == STATUS_CRIT)
+ return -EIO;
+ }
+
+ /*
+ * Try to convince the IMU to cooperate.. as explained in the comments
+ * at the top of this file, the IMU could also refuse the command (i.e.
+ * it is not ready yet); retry in this case.
+ */
+ do {
+ mutex_lock(&priv->lock);
+ priv->expect_response = read ? CMD_READ : CMD_WRITE;
+ reinit_completion(&priv->cmd_complete);
+ mutex_unlock(&priv->lock);
+
+ if (retry != retry_max)
+ trace_cmd_retry(read, addr, retry_max - retry);
+ ret = bno055_ser_do_send_cmd(priv, read, addr, len, data);
+ if (ret)
+ continue;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->cmd_complete,
+ msecs_to_jiffies(100));
+ if (ret == -ERESTARTSYS) {
+ priv->cmd_stale = true;
+ return -ERESTARTSYS;
+ }
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (priv->cmd_status == STATUS_OK)
+ return 0;
+ if (priv->cmd_status == STATUS_CRIT)
+ return -EIO;
+
+ /* loop in case priv->cmd_status == STATUS_FAIL */
+ } while (--retry);
+
+ if (ret < 0)
+ return ret;
+ if (priv->cmd_status == STATUS_FAIL)
+ return -EINVAL;
+ return 0;
+}
+
+static int bno055_ser_write_reg(void *context, const void *_data, size_t count)
+{
+ const u8 *data = _data;
+ struct bno055_ser_priv *priv = context;
+
+ if (count < 2) {
+ dev_err(&priv->serdev->dev, "Invalid write count %zu", count);
+ return -EINVAL;
+ }
+
+ trace_write_reg(data[0], data[1]);
+ return bno055_ser_send_cmd(priv, 0, data[0], count - 1, data + 1);
+}
+
+static int bno055_ser_read_reg(void *context,
+ const void *_reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ int reg_addr;
+ const u8 *reg = _reg;
+ struct bno055_ser_priv *priv = context;
+
+ if (val_size > 128) {
+ dev_err(&priv->serdev->dev, "Invalid read valsize %zu", val_size);
+ return -EINVAL;
+ }
+
+ reg_addr = *reg;
+ trace_read_reg(reg_addr, val_size);
+ mutex_lock(&priv->lock);
+ priv->expected_data_len = val_size;
+ priv->response_buf = val;
+ mutex_unlock(&priv->lock);
+
+ ret = bno055_ser_send_cmd(priv, 1, reg_addr, val_size, NULL);
+
+ mutex_lock(&priv->lock);
+ priv->response_buf = NULL;
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Handler for received data; this is called from the receiver callback whenever
+ * it got some packet from the serial bus. The status tells us whether the
+ * packet is valid (i.e. header ok && received payload len consistent wrt the
+ * header). It's now our responsibility to check whether this is what we
+ * expected, of whether we got some unexpected, yet valid, packet.
+ */
+static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
+{
+ mutex_lock(&priv->lock);
+ switch (priv->expect_response) {
+ case CMD_NONE:
+ dev_warn(&priv->serdev->dev, "received unexpected, yet valid, data from sensor");
+ mutex_unlock(&priv->lock);
+ return;
+
+ case CMD_READ:
+ priv->cmd_status = status;
+ if (status == STATUS_OK &&
+ priv->rx.databuf_count != priv->expected_data_len) {
+ /*
+ * If we got here, then the lower layer serial protocol
+ * seems consistent with itself; if we got an unexpected
+ * amount of data then signal it as a non critical error
+ */
+ priv->cmd_status = STATUS_FAIL;
+ dev_warn(&priv->serdev->dev,
+ "received an unexpected amount of, yet valid, data from sensor");
+ }
+ break;
+
+ case CMD_WRITE:
+ priv->cmd_status = status;
+ break;
+ }
+
+ priv->expect_response = CMD_NONE;
+ mutex_unlock(&priv->lock);
+ complete(&priv->cmd_complete);
+}
+
+/*
+ * Serdev receiver FSM. This tracks the serial communication and parse the
+ * header. It pushes packets to bno055_ser_handle_rx(), eventually communicating
+ * failures (i.e. malformed packets).
+ * Ideally it doesn't know anything about upper layer (i.e. if this is the
+ * packet we were really expecting), but since we copies the payload into the
+ * receiver buffer (that is not valid when i.e. we don't expect data), we
+ * snoop a bit in the upper layer..
+ * Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
+ * unless we require to AND we don't queue more than one request per time).
+ */
+static int bno055_ser_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ int status;
+ struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
+ int remaining = size;
+
+ if (size == 0)
+ return 0;
+
+ trace_recv(size, buf);
+ switch (priv->rx.state) {
+ case RX_IDLE:
+ /*
+ * New packet.
+ * Check for its 1st byte that identifies the pkt type.
+ */
+ if (buf[0] != 0xEE && buf[0] != 0xBB) {
+ dev_err(&priv->serdev->dev,
+ "Invalid packet start %x", buf[0]);
+ bno055_ser_handle_rx(priv, STATUS_CRIT);
+ break;
+ }
+ priv->rx.type = buf[0];
+ priv->rx.state = RX_START;
+ remaining--;
+ buf++;
+ priv->rx.databuf_count = 0;
+ fallthrough;
+
+ case RX_START:
+ /*
+ * Packet RX in progress, we expect either 1-byte len or 1-byte
+ * status depending by the packet type.
+ */
+ if (remaining == 0)
+ break;
+
+ if (priv->rx.type == 0xEE) {
+ if (remaining > 1) {
+ dev_err(&priv->serdev->dev, "EE pkt. Extra data received");
+ status = STATUS_CRIT;
+ } else {
+ status = (buf[0] == 1) ? STATUS_OK : STATUS_FAIL;
+ }
+ bno055_ser_handle_rx(priv, status);
+ priv->rx.state = RX_IDLE;
+ break;
+
+ } else {
+ /*priv->rx.type == 0xBB */
+ priv->rx.state = RX_DATA;
+ priv->rx.expected_len = buf[0];
+ remaining--;
+ buf++;
+ }
+ fallthrough;
+
+ case RX_DATA:
+ /* Header parsed; now receiving packet data payload */
+ if (remaining == 0)
+ break;
+
+ if (priv->rx.databuf_count + remaining > priv->rx.expected_len) {
+ /*
+ * This is an inconsistency in serial protocol, we lost
+ * sync and we don't know how to handle further data
+ */
+ dev_err(&priv->serdev->dev, "BB pkt. Extra data received");
+ bno055_ser_handle_rx(priv, STATUS_CRIT);
+ priv->rx.state = RX_IDLE;
+ break;
+ }
+
+ mutex_lock(&priv->lock);
+ /*
+ * NULL e.g. when read cmd is stale or when no read cmd is
+ * actually pending.
+ */
+ if (priv->response_buf &&
+ /*
+ * Snoop on the upper layer protocol stuff to make sure not
+ * to write to an invalid memory. Apart for this, let's the
+ * upper layer manage any inconsistency wrt expected data
+ * len (as long as the serial protocol is consistent wrt
+ * itself (i.e. response header is consistent with received
+ * response len.
+ */
+ (priv->rx.databuf_count + remaining <= priv->expected_data_len))
+ memcpy(priv->response_buf + priv->rx.databuf_count,
+ buf, remaining);
+ mutex_unlock(&priv->lock);
+
+ priv->rx.databuf_count += remaining;
+
+ /*
+ * Reached expected len advertised by the IMU for the current
+ * packet. Pass it to the upper layer (for us it is just valid).
+ */
+ if (priv->rx.databuf_count == priv->rx.expected_len) {
+ bno055_ser_handle_rx(priv, STATUS_OK);
+ priv->rx.state = RX_IDLE;
+ }
+ break;
+ }
+
+ return size;
+}
+
+static const struct serdev_device_ops bno055_ser_serdev_ops = {
+ .receive_buf = bno055_ser_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static struct regmap_bus bno055_ser_regmap_bus = {
+ .write = bno055_ser_write_reg,
+ .read = bno055_ser_read_reg,
+};
+
+static int bno055_ser_probe(struct serdev_device *serdev)
+{
+ struct bno055_ser_priv *priv;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(&serdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ serdev_device_set_drvdata(serdev, priv);
+ priv->serdev = serdev;
+ mutex_init(&priv->lock);
+ init_completion(&priv->cmd_complete);
+
+ serdev_device_set_client_ops(serdev, &bno055_ser_serdev_ops);
+ ret = devm_serdev_device_open(&serdev->dev, serdev);
+ if (ret)
+ return ret;
+
+ if (serdev_device_set_baudrate(serdev, 115200) != 115200) {
+ dev_err(&serdev->dev, "Cannot set required baud rate");
+ return -EIO;
+ }
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret) {
+ dev_err(&serdev->dev, "Cannot set required parity setting");
+ return ret;
+ }
+ serdev_device_set_flow_control(serdev, false);
+
+ regmap = devm_regmap_init(&serdev->dev, &bno055_ser_regmap_bus,
+ priv, &bno055_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&serdev->dev, PTR_ERR(regmap),
+ "Unable to init register map");
+
+ return bno055_probe(&serdev->dev, regmap,
+ BNO055_SER_XFER_BURST_BREAK_THRESHOLD, false);
+}
+
+static const struct of_device_id bno055_ser_of_match[] = {
+ { .compatible = "bosch,bno055" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bno055_ser_of_match);
+
+static struct serdev_device_driver bno055_ser_driver = {
+ .driver = {
+ .name = "bno055-ser",
+ .of_match_table = bno055_ser_of_match,
+ },
+ .probe = bno055_ser_probe,
+};
+module_serdev_device_driver(bno055_ser_driver);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@iit.it>");
+MODULE_DESCRIPTION("Bosch BNO055 serdev interface");
+MODULE_IMPORT_NS(IIO_BNO055);
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055_ser_trace.c b/drivers/iio/imu/bno055/bno055_ser_trace.c
new file mode 100644
index 000000000000..48397b66daef
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_trace.c
@@ -0,0 +1,14 @@
+//SPDX-License-Identifier: GPL-2.0
+
+/*
+ * bno055_ser Trace Support
+ * Copyright (C) 2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ *
+ * Based on:
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ */
+
+#define CREATE_TRACE_POINTS
+#include "bno055_ser_trace.h"
diff --git a/drivers/iio/imu/bno055/bno055_ser_trace.h b/drivers/iio/imu/bno055/bno055_ser_trace.h
new file mode 100644
index 000000000000..7d9eae166eec
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_trace.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(__BNO055_SERDEV_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __BNO055_SERDEV_TRACE_H__
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bno055_ser
+
+TRACE_EVENT(send_chunk,
+ TP_PROTO(int len, const u8 *data),
+ TP_ARGS(len, data),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __dynamic_array(u8, chunk, len)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ memcpy(__get_dynamic_array(chunk),
+ data, __entry->len);
+ ),
+ TP_printk("len: %d, data: = %*ph",
+ __entry->len, __entry->len, __get_dynamic_array(chunk)
+ )
+);
+
+TRACE_EVENT(cmd_retry,
+ TP_PROTO(bool read, int addr, int retry),
+ TP_ARGS(read, addr, retry),
+ TP_STRUCT__entry(
+ __field(bool, read)
+ __field(int, addr)
+ __field(int, retry)
+ ),
+ TP_fast_assign(
+ __entry->read = read;
+ __entry->addr = addr;
+ __entry->retry = retry;
+ ),
+ TP_printk("%s addr 0x%x retry #%d",
+ __entry->read ? "read" : "write",
+ __entry->addr, __entry->retry
+ )
+);
+
+TRACE_EVENT(write_reg,
+ TP_PROTO(u8 addr, u8 value),
+ TP_ARGS(addr, value),
+ TP_STRUCT__entry(
+ __field(u8, addr)
+ __field(u8, value)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("reg 0x%x = 0x%x",
+ __entry->addr, __entry->value
+ )
+);
+
+TRACE_EVENT(read_reg,
+ TP_PROTO(int addr, size_t len),
+ TP_ARGS(addr, len),
+ TP_STRUCT__entry(
+ __field(int, addr)
+ __field(size_t, len)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->len = len;
+ ),
+ TP_printk("reg 0x%x (len %zu)",
+ __entry->addr, __entry->len
+ )
+);
+
+TRACE_EVENT(recv,
+ TP_PROTO(size_t len, const unsigned char *buf),
+ TP_ARGS(len, buf),
+ TP_STRUCT__entry(
+ __field(size_t, len)
+ __dynamic_array(unsigned char, buf, len)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf),
+ buf, __entry->len);
+ ),
+ TP_printk("len: %zu, data: = %*ph",
+ __entry->len, (int)__entry->len, __get_dynamic_array(buf)
+ )
+);
+
+#endif /* __BNO055_SERDEV_TRACE_H__ || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE bno055_ser_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 9b4298095d3f..f7bce428d9eb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -65,7 +65,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
sub_elem = &elem->package.elements[j];
if (sub_elem->type == ACPI_TYPE_STRING)
- strlcpy(info->type, sub_elem->string.pointer,
+ strscpy(info->type, sub_elem->string.pointer,
sizeof(info->type));
else if (sub_elem->type == ACPI_TYPE_INTEGER) {
if (sub_elem->integer.value != client->addr) {
@@ -158,7 +158,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
char *name;
info.addr = secondary;
- strlcpy(info.type, dev_name(&adev->dev),
+ strscpy(info.type, dev_name(&adev->dev),
sizeof(info.type));
name = strchr(info.type, ':');
if (name)
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index fefd0b939100..2ed2b3f40c0b 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -12,7 +12,7 @@ config IIO_ST_LSM6DSX
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
- lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop,
+ lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, lsm6dstx,
the accelerometer/gyroscope of lsm9ds1 and lsm6dst.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index a86dd29a4738..6b57d47be69e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -32,6 +32,7 @@
#define ST_LSM6DST_DEV_NAME "lsm6dst"
#define ST_LSM6DSOP_DEV_NAME "lsm6dsop"
#define ST_ASM330LHHX_DEV_NAME "asm330lhhx"
+#define ST_LSM6DSTX_DEV_NAME "lsm6dstx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -51,6 +52,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DST_ID,
ST_LSM6DSOP_ID,
ST_ASM330LHHX_ID,
+ ST_LSM6DSTX_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index c7d3730ab1c5..e49f2d120ed3 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -15,7 +15,7 @@
* value of the decimation factor and ODR set for each FIFO data set.
*
* LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
- * LSM6DST/LSM6DSOP:
+ * LSM6DST/LSM6DSOP/LSM6DSTX:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b5e4a4113652..f8bbb005718e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -26,7 +26,8 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP/
+ * LSM6DSTX:
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
@@ -791,6 +792,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.hw_id = ST_ASM330LHHX_ID,
.name = ST_ASM330LHHX_DEV_NAME,
.wai = 0x6b,
+ }, {
+ .hw_id = ST_LSM6DSTX_ID,
+ .name = ST_LSM6DSTX_DEV_NAME,
+ .wai = 0x6d,
},
},
.channels = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 2ea34c0d3a8c..307c8c436862 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -105,6 +105,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,asm330lhhx",
.data = (void *)ST_ASM330LHHX_ID,
},
+ {
+ .compatible = "st,lsm6dstx",
+ .data = (void *)ST_LSM6DSTX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -127,6 +131,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
{ ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
+ { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 6a8883f022a8..6a4eecf4bb05 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -105,6 +105,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,asm330lhhx",
.data = (void *)ST_ASM330LHHX_ID,
},
+ {
+ .compatible = "st,lsm6dstx",
+ .data = (void *)ST_LSM6DSTX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -127,6 +131,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
{ ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
+ { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index acc2b6c05d57..228598b82a2f 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -843,8 +843,8 @@ static int iio_verify_update(struct iio_dev *indio_dev,
* to verify.
*/
if (remove_buffer && !insert_buffer &&
- list_is_singular(&iio_dev_opaque->buffer_list))
- return 0;
+ list_is_singular(&iio_dev_opaque->buffer_list))
+ return 0;
modes = indio_dev->modes;
@@ -940,6 +940,7 @@ struct iio_demux_table {
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
struct iio_demux_table *p, *q;
+
list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
list_del(&p->l);
kfree(p);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 0f4dbda3b9d3..151ff3993354 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -134,6 +134,12 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_ETHANOL] = "ethanol",
[IIO_MOD_H2] = "h2",
[IIO_MOD_O2] = "o2",
+ [IIO_MOD_LINEAR_X] = "linear_x",
+ [IIO_MOD_LINEAR_Y] = "linear_y",
+ [IIO_MOD_LINEAR_Z] = "linear_z",
+ [IIO_MOD_PITCH] = "pitch",
+ [IIO_MOD_YAW] = "yaw",
+ [IIO_MOD_ROLL] = "roll",
};
/* relies on pairs of these shared then separate */
@@ -168,6 +174,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
+ [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
};
/**
* iio_device_id() - query the unique ID for the device
@@ -236,6 +243,7 @@ static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
return iio_dev_opaque->debugfs_dentry;
}
EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
@@ -447,6 +455,7 @@ static const struct file_operations iio_debugfs_reg_fops = {
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
}
@@ -1021,6 +1030,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
int ret = 0;
char *name = NULL;
char *full_postfix;
+
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
@@ -1299,8 +1309,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SEPARATE,
- &chan->
- info_mask_separate_available);
+ &chan->info_mask_separate_available);
if (ret < 0)
return ret;
attrcount += ret;
@@ -1314,8 +1323,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SHARED_BY_TYPE,
- &chan->
- info_mask_shared_by_type_available);
+ &chan->info_mask_shared_by_type_available);
if (ret < 0)
return ret;
attrcount += ret;
@@ -1355,6 +1363,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
if (chan->ext_info) {
unsigned int i = 0;
+
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
ret = __iio_add_chan_devattr(ext_info->name,
chan,
@@ -1403,6 +1412,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
return sysfs_emit(buf, "%s\n", indio_dev->name);
}
@@ -1412,6 +1422,7 @@ static ssize_t label_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
return sysfs_emit(buf, "%s\n", indio_dev->label);
}
@@ -1565,7 +1576,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_clear_attrs;
}
- /* Copy across original attributes */
+ /* Copy across original attributes, and point to original binary attributes */
if (indio_dev->info->attrs) {
memcpy(iio_dev_opaque->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
@@ -1573,6 +1584,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
*attrcount_orig);
iio_dev_opaque->chan_attr_group.is_visible =
indio_dev->info->attrs->is_visible;
+ iio_dev_opaque->chan_attr_group.bin_attrs =
+ indio_dev->info->attrs->bin_attrs;
}
attrn = attrcount_orig;
/* Add all elements from the list. */
@@ -1621,6 +1634,8 @@ static void iio_dev_release(struct device *device)
iio_device_detach_buffers(indio_dev);
+ lockdep_unregister_key(&iio_dev_opaque->mlock_key);
+
ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
}
@@ -1680,6 +1695,9 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
+ lockdep_register_key(&iio_dev_opaque->mlock_key);
+ lockdep_set_class(&indio_dev->mlock, &iio_dev_opaque->mlock_key);
+
return indio_dev;
}
EXPORT_SYMBOL(iio_device_alloc);
@@ -1777,6 +1795,7 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
struct iio_dev_opaque *iio_dev_opaque =
container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
+
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
iio_device_put(indio_dev);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index b5e059e15b0a..3d78da2531a9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -231,12 +231,15 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
+ [IIO_EV_TYPE_GESTURE] = "gesture",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
+ [IIO_EV_DIR_FALLING] = "falling",
+ [IIO_EV_DIR_SINGLETAP] = "singletap",
+ [IIO_EV_DIR_DOUBLETAP] = "doubletap",
};
static const char * const iio_ev_info_text[] = {
@@ -247,6 +250,8 @@ static const char * const iio_ev_info_text[] = {
[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
[IIO_EV_INFO_TIMEOUT] = "timeout",
+ [IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout",
+ [IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay",
};
static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
@@ -354,9 +359,10 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
enum iio_shared_by shared_by, const unsigned long *mask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- ssize_t (*show)(struct device *, struct device_attribute *, char *);
- ssize_t (*store)(struct device *, struct device_attribute *,
- const char *, size_t);
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
unsigned int attrcount = 0;
unsigned int i;
char *postfix;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index b78814d869b7..6885a186fe27 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -50,6 +50,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
+
return sysfs_emit(buf, "%s\n", trig->name);
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index df74765d33dc..872fd5c24147 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -5,9 +5,9 @@
*/
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
@@ -45,13 +45,13 @@ int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
int i = 0, ret = 0;
struct iio_map_internal *mapi;
- if (maps == NULL)
+ if (!maps)
return 0;
mutex_lock(&iio_map_list_lock);
- while (maps[i].consumer_dev_name != NULL) {
+ while (maps[i].consumer_dev_name) {
mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
- if (mapi == NULL) {
+ if (!mapi) {
ret = -ENOMEM;
goto error_ret;
}
@@ -69,7 +69,6 @@ error_ret:
}
EXPORT_SYMBOL_GPL(iio_map_array_register);
-
/*
* Remove all map entries associated with the given iio device
*/
@@ -117,15 +116,8 @@ static const struct iio_chan_spec
return chan;
}
-#ifdef CONFIG_OF
-
-static int iio_dev_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data && dev->type == &iio_device_type;
-}
-
/**
- * __of_iio_simple_xlate - translate iiospec to the IIO channel index
+ * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
* @indio_dev: pointer to the iio_dev structure
* @iiospec: IIO specifier as found in the device tree
*
@@ -134,14 +126,14 @@ static int iio_dev_node_match(struct device *dev, const void *data)
* whether IIO index is less than num_channels (that is specified in the
* iio_dev).
*/
-static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
- if (!iiospec->args_count)
+ if (!iiospec->nargs)
return 0;
if (iiospec->args[0] >= indio_dev->num_channels) {
- dev_err(&indio_dev->dev, "invalid channel index %u\n",
+ dev_err(&indio_dev->dev, "invalid channel index %llu\n",
iiospec->args[0]);
return -EINVAL;
}
@@ -149,32 +141,33 @@ static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
return iiospec->args[0];
}
-static int __of_iio_channel_get(struct iio_channel *channel,
- struct device_node *np, int index)
+static int __fwnode_iio_channel_get(struct iio_channel *channel,
+ struct fwnode_handle *fwnode, int index)
{
+ struct fwnode_reference_args iiospec;
struct device *idev;
struct iio_dev *indio_dev;
int err;
- struct of_phandle_args iiospec;
- err = of_parse_phandle_with_args(np, "io-channels",
- "#io-channel-cells",
- index, &iiospec);
+ err = fwnode_property_get_reference_args(fwnode, "io-channels",
+ "#io-channel-cells", 0,
+ index, &iiospec);
if (err)
return err;
- idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
- iio_dev_node_match);
- of_node_put(iiospec.np);
- if (idev == NULL)
+ idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
+ if (!idev) {
+ fwnode_handle_put(iiospec.fwnode);
return -EPROBE_DEFER;
+ }
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
- if (indio_dev->info->of_xlate)
- index = indio_dev->info->of_xlate(indio_dev, &iiospec);
+ if (indio_dev->info->fwnode_xlate)
+ index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
else
- index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
+ fwnode_handle_put(iiospec.fwnode);
if (index < 0)
goto err_put;
channel->channel = &indio_dev->channels[index];
@@ -186,7 +179,8 @@ err_put:
return index;
}
-static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
+static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
+ int index)
{
struct iio_channel *channel;
int err;
@@ -195,10 +189,10 @@ static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
return ERR_PTR(-EINVAL);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (channel == NULL)
+ if (!channel)
return ERR_PTR(-ENOMEM);
- err = __of_iio_channel_get(channel, np, index);
+ err = __fwnode_iio_channel_get(channel, fwnode, index);
if (err)
goto err_free_channel;
@@ -209,74 +203,116 @@ err_free_channel:
return ERR_PTR(err);
}
-struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
- const char *name)
+static struct iio_channel *
+__fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
{
- struct iio_channel *chan = NULL;
-
- /* Walk up the tree of devices looking for a matching iio channel */
- while (np) {
- int index = 0;
-
+ struct iio_channel *chan;
+ int index = 0;
+
+ /*
+ * For named iio channels, first look up the name in the
+ * "io-channel-names" property. If it cannot be found, the
+ * index will be an error code, and fwnode_iio_channel_get()
+ * will fail.
+ */
+ if (name)
+ index = fwnode_property_match_string(fwnode, "io-channel-names",
+ name);
+
+ chan = fwnode_iio_channel_get(fwnode, index);
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+ if (name) {
+ if (index >= 0) {
+ pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
+ fwnode, name, index);
+ /*
+ * In this case, we found 'name' in 'io-channel-names'
+ * but somehow we still fail so that we should not proceed
+ * with any other lookup. Hence, explicitly return -EINVAL
+ * (maybe not the better error code) so that the caller
+ * won't do a system lookup.
+ */
+ return ERR_PTR(-EINVAL);
+ }
/*
- * For named iio channels, first look up the name in the
- * "io-channel-names" property. If it cannot be found, the
- * index will be an error code, and of_iio_channel_get()
- * will fail.
+ * If index < 0, then fwnode_property_get_reference_args() fails
+ * with -EINVAL or -ENOENT (ACPI case) which is expected. We
+ * should not proceed if we get any other error.
*/
- if (name)
- index = of_property_match_string(np, "io-channel-names",
- name);
- chan = of_iio_channel_get(np, index);
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- break;
- else if (name && index >= 0) {
- pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
- np, name ? name : "", index);
- return NULL;
- }
-
+ if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
+ return chan;
+ } else if (PTR_ERR(chan) != -ENOENT) {
/*
- * No matching IIO channel found on this node.
- * If the parent node has a "io-channel-ranges" property,
- * then we can try one of its channels.
+ * if !name, then we should only proceed the lookup if
+ * fwnode_property_get_reference_args() returns -ENOENT.
*/
- np = np->parent;
- if (np && !of_get_property(np, "io-channel-ranges", NULL))
- return NULL;
+ return chan;
}
- return chan;
+ /* so we continue the lookup */
+ return ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
-static struct iio_channel *of_iio_channel_get_all(struct device *dev)
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name)
{
+ struct fwnode_handle *parent;
+ struct iio_channel *chan;
+
+ /* Walk up the tree of devices looking for a matching iio channel */
+ chan = __fwnode_iio_channel_get_by_name(fwnode, name);
+ if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
+ return chan;
+
+ /*
+ * No matching IIO channel found on this node.
+ * If the parent node has a "io-channel-ranges" property,
+ * then we can try one of its channels.
+ */
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (!fwnode_property_present(parent, "io-channel-ranges")) {
+ fwnode_handle_put(parent);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = __fwnode_iio_channel_get_by_name(fwnode, name);
+ if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
+ fwnode_handle_put(parent);
+ return chan;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
+
+static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct iio_channel *chans;
int i, mapind, nummaps = 0;
int ret;
do {
- ret = of_parse_phandle_with_args(dev->of_node,
- "io-channels",
- "#io-channel-cells",
- nummaps, NULL);
+ ret = fwnode_property_get_reference_args(fwnode, "io-channels",
+ "#io-channel-cells", 0,
+ nummaps, NULL);
if (ret < 0)
break;
} while (++nummaps);
- if (nummaps == 0) /* no error, return NULL to search map table */
- return NULL;
+ if (nummaps == 0)
+ return ERR_PTR(-ENODEV);
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
- if (chans == NULL)
+ if (!chans)
return ERR_PTR(-ENOMEM);
- /* Search for OF matches */
+ /* Search for FW matches */
for (mapind = 0; mapind < nummaps; mapind++) {
- ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
- mapind);
+ ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
if (ret)
goto error_free_chans;
}
@@ -289,15 +325,6 @@ error_free_chans:
return ERR_PTR(ret);
}
-#else /* CONFIG_OF */
-
-static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_OF */
-
static struct iio_channel *iio_channel_get_sys(const char *name,
const char *channel_name)
{
@@ -305,7 +332,7 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
struct iio_channel *channel;
int err;
- if (name == NULL && channel_name == NULL)
+ if (!(name || channel_name))
return ERR_PTR(-ENODEV);
/* first find matching entry the channel map */
@@ -320,11 +347,11 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
break;
}
mutex_unlock(&iio_map_list_lock);
- if (c == NULL)
+ if (!c)
return ERR_PTR(-ENODEV);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (channel == NULL) {
+ if (!channel) {
err = -ENOMEM;
goto error_no_mem;
}
@@ -336,7 +363,7 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
iio_chan_spec_from_name(channel->indio_dev,
c->map->adc_channel_label);
- if (channel->channel == NULL) {
+ if (!channel->channel) {
err = -EINVAL;
goto error_no_chan;
}
@@ -358,9 +385,9 @@ struct iio_channel *iio_channel_get(struct device *dev,
struct iio_channel *channel;
if (dev) {
- channel = of_iio_channel_get_by_name(dev->of_node,
- channel_name);
- if (channel != NULL)
+ channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
+ channel_name);
+ if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
return channel;
}
@@ -400,14 +427,14 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
-struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
- struct device_node *np,
- const char *channel_name)
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *channel_name)
{
struct iio_channel *channel;
int ret;
- channel = of_iio_channel_get_by_name(np, channel_name);
+ channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
if (IS_ERR(channel))
return channel;
@@ -417,7 +444,7 @@ struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
return channel;
}
-EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
+EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
struct iio_channel *iio_channel_get_all(struct device *dev)
{
@@ -428,11 +455,15 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
int mapind = 0;
int i, ret;
- if (dev == NULL)
+ if (!dev)
return ERR_PTR(-EINVAL);
- chans = of_iio_channel_get_all(dev);
- if (chans)
+ chans = fwnode_iio_channel_get_all(dev);
+ /*
+ * We only want to carry on if the error is -ENODEV. Anything else
+ * should be reported up the stack.
+ */
+ if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
return chans;
name = dev_name(dev);
@@ -452,7 +483,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
- if (chans == NULL) {
+ if (!chans) {
ret = -ENOMEM;
goto error_ret;
}
@@ -466,7 +497,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
chans[mapind].channel =
iio_chan_spec_from_name(chans[mapind].indio_dev,
c->map->adc_channel_label);
- if (chans[mapind].channel == NULL) {
+ if (!chans[mapind].channel) {
ret = -EINVAL;
goto error_free_chans;
}
@@ -528,14 +559,14 @@ struct iio_channel *devm_iio_channel_get_all(struct device *dev)
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
- enum iio_chan_info_enum info)
+ enum iio_chan_info_enum info)
{
int unused;
int vals[INDIO_MAX_RAW_ELEMENTS];
int ret;
int val_len = 2;
- if (val2 == NULL)
+ if (!val2)
val2 = &unused;
if (!iio_channel_has_info(chan->channel, info))
@@ -547,9 +578,10 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
vals, &val_len, info);
*val = vals[0];
*val2 = vals[1];
- } else
+ } else {
ret = chan->indio_dev->info->read_raw(chan->indio_dev,
chan->channel, val, val2, info);
+ }
return ret;
}
@@ -560,7 +592,7 @@ int iio_read_channel_raw(struct iio_channel *chan, int *val)
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -579,7 +611,7 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -593,7 +625,8 @@ err_unlock:
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
- int raw, int *processed, unsigned int scale)
+ int raw, int *processed,
+ unsigned int scale)
{
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
@@ -626,7 +659,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
}
scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
- IIO_CHAN_INFO_SCALE);
+ IIO_CHAN_INFO_SCALE);
if (scale_type < 0) {
/*
* If no channel scaling is available apply consumer scale to
@@ -671,19 +704,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
}
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
- int *processed, unsigned int scale)
+ int *processed, unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
- scale);
+ scale);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
@@ -698,7 +731,7 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -724,7 +757,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -802,7 +835,7 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
int type;
ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
- IIO_CHAN_INFO_RAW);
+ IIO_CHAN_INFO_RAW);
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
@@ -886,7 +919,7 @@ int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
/* Need to verify underlying driver has not gone away */
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -913,7 +946,7 @@ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -947,9 +980,8 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
}
EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
-static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
- const struct iio_channel *chan,
- const char *attr)
+static const struct iio_chan_spec_ext_info *
+iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
{
const struct iio_chan_spec_ext_info *ext_info;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 8537e88f02e3..7cf6e8490123 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -331,6 +331,17 @@ config LTR501
This driver can also be built as a module. If so, the module
will be called ltr501.
+config LTRF216A
+ tristate "Liteon LTRF216A Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say Y or M here, you get support for Liteon LTRF216A
+ Ambient Light Sensor.
+
+ If built as a dynamically linked module, it will be called
+ ltrf216a.
+
config LV0104CS
tristate "LV0104CS Ambient Light Sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index d10912faf964..6f23817fae6f 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_LTRF216A) += ltrf216a.o
obj-$(CONFIG_LV0104CS) += lv0104cs.o
obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_MAX44009) += max44009.o
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
new file mode 100644
index 000000000000..4b8ef36b6912
--- /dev/null
+++ b/drivers/iio/light/ltrf216a.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LTRF216A Ambient Light Sensor
+ *
+ * Copyright (C) 2022 Collabora, Ltd.
+ * Author: Shreeya Patel <shreeya.patel@collabora.com>
+ *
+ * Copyright (C) 2021 Lite-On Technology Corp (Singapore)
+ * Author: Shi Zhigang <Zhigang.Shi@liteon.com>
+ *
+ * IIO driver for LTRF216A (7-bit I2C slave address 0x53).
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#include <asm/unaligned.h>
+
+#define LTRF216A_ALS_RESET_MASK BIT(4)
+#define LTRF216A_ALS_DATA_STATUS BIT(3)
+#define LTRF216A_ALS_ENABLE_MASK BIT(1)
+#define LTRF216A_MAIN_CTRL 0x00
+#define LTRF216A_ALS_MEAS_RES 0x04
+#define LTRF216A_ALS_GAIN 0x05
+#define LTRF216A_PART_ID 0x06
+#define LTRF216A_MAIN_STATUS 0x07
+#define LTRF216A_ALS_CLEAR_DATA_0 0x0a
+#define LTRF216A_ALS_CLEAR_DATA_1 0x0b
+#define LTRF216A_ALS_CLEAR_DATA_2 0x0c
+#define LTRF216A_ALS_DATA_0 0x0d
+#define LTRF216A_ALS_DATA_1 0x0e
+#define LTRF216A_ALS_DATA_2 0x0f
+#define LTRF216A_INT_CFG 0x19
+#define LTRF216A_INT_PST 0x1a
+#define LTRF216A_ALS_THRES_UP_0 0x21
+#define LTRF216A_ALS_THRES_UP_1 0x22
+#define LTRF216A_ALS_THRES_UP_2 0x23
+#define LTRF216A_ALS_THRES_LOW_0 0x24
+#define LTRF216A_ALS_THRES_LOW_1 0x25
+#define LTRF216A_ALS_THRES_LOW_2 0x26
+#define LTRF216A_ALS_READ_DATA_DELAY_US 20000
+
+static const int ltrf216a_int_time_available[][2] = {
+ { 0, 400000 },
+ { 0, 200000 },
+ { 0, 100000 },
+ { 0, 50000 },
+ { 0, 25000 },
+};
+
+static const int ltrf216a_int_time_reg[][2] = {
+ { 400, 0x03 },
+ { 200, 0x13 },
+ { 100, 0x22 },
+ { 50, 0x31 },
+ { 25, 0x40 },
+};
+
+/*
+ * Window Factor is needed when the device is under Window glass
+ * with coated tinted ink. This is to compensate for the light loss
+ * due to the lower transmission rate of the window glass and helps
+ * in calculating lux.
+ */
+#define LTRF216A_WIN_FAC 1
+
+struct ltrf216a_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ u32 int_time;
+ u16 int_time_fac;
+ u8 als_gain_fac;
+ /*
+ * Protects regmap accesses and makes sure integration time
+ * remains constant during the measurement of lux.
+ */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec ltrf216a_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static void ltrf216a_reset(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
+ /* reset sensor, chip fails to respond to this, so ignore any errors */
+ regmap_write(data->regmap, LTRF216A_MAIN_CTRL, LTRF216A_ALS_RESET_MASK);
+
+ /* reset time */
+ usleep_range(1000, 2000);
+}
+
+static int ltrf216a_enable(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ /* enable sensor */
+ ret = regmap_set_bits(data->regmap,
+ LTRF216A_MAIN_CTRL, LTRF216A_ALS_ENABLE_MASK);
+ if (ret) {
+ dev_err(dev, "failed to enable sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* sleep for one integration cycle after enabling the device */
+ msleep(ltrf216a_int_time_reg[0][0]);
+
+ return 0;
+}
+
+static int ltrf216a_disable(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = regmap_write(data->regmap, LTRF216A_MAIN_CTRL, 0);
+ if (ret)
+ dev_err(dev, "failed to disable sensor: %d\n", ret);
+
+ return ret;
+}
+
+static void ltrf216a_cleanup(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ ltrf216a_disable(indio_dev);
+}
+
+static int ltrf216a_set_int_time(struct ltrf216a_data *data, int itime)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int i;
+ u8 reg_val;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ltrf216a_int_time_available); i++) {
+ if (ltrf216a_int_time_available[i][1] == itime)
+ break;
+ }
+ if (i == ARRAY_SIZE(ltrf216a_int_time_available))
+ return -EINVAL;
+
+ reg_val = ltrf216a_int_time_reg[i][1];
+
+ ret = regmap_write(data->regmap, LTRF216A_ALS_MEAS_RES, reg_val);
+ if (ret) {
+ dev_err(dev, "failed to set integration time: %d\n", ret);
+ return ret;
+ }
+
+ data->int_time_fac = ltrf216a_int_time_reg[i][0];
+ data->int_time = itime;
+
+ return 0;
+}
+
+static int ltrf216a_get_int_time(struct ltrf216a_data *data,
+ int *val, int *val2)
+{
+ *val = 0;
+ *val2 = data->int_time;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ltrf216a_set_power_state(struct ltrf216a_data *data, bool on)
+{
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+
+ if (on) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume runtime PM: %d\n", ret);
+ return ret;
+ }
+ } else {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static int ltrf216a_read_data(struct ltrf216a_data *data, u8 addr)
+{
+ struct device *dev = &data->client->dev;
+ int ret, val;
+ u8 buf[3];
+
+ ret = regmap_read_poll_timeout(data->regmap, LTRF216A_MAIN_STATUS,
+ val, val & LTRF216A_ALS_DATA_STATUS,
+ LTRF216A_ALS_READ_DATA_DELAY_US,
+ LTRF216A_ALS_READ_DATA_DELAY_US * 50);
+ if (ret) {
+ dev_err(dev, "failed to wait for measurement data: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, addr, buf, sizeof(buf));
+ if (ret) {
+ dev_err(dev, "failed to read measurement data: %d\n", ret);
+ return ret;
+ }
+
+ return get_unaligned_le24(&buf[0]);
+}
+
+static int ltrf216a_get_lux(struct ltrf216a_data *data)
+{
+ int ret, greendata;
+ u64 lux, div;
+
+ ret = ltrf216a_set_power_state(data, true);
+ if (ret)
+ return ret;
+
+ greendata = ltrf216a_read_data(data, LTRF216A_ALS_DATA_0);
+ if (greendata < 0)
+ return greendata;
+
+ ltrf216a_set_power_state(data, false);
+
+ lux = greendata * 45 * LTRF216A_WIN_FAC * 100;
+ div = data->als_gain_fac * data->int_time_fac * 100;
+
+ return div_u64(lux, div);
+}
+
+static int ltrf216a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ltrf216a_set_power_state(data, true);
+ if (ret)
+ return ret;
+ mutex_lock(&data->lock);
+ ret = ltrf216a_read_data(data, LTRF216A_ALS_DATA_0);
+ mutex_unlock(&data->lock);
+ ltrf216a_set_power_state(data, false);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&data->lock);
+ ret = ltrf216a_get_lux(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ mutex_lock(&data->lock);
+ ret = ltrf216a_get_int_time(data, val, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltrf216a_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = ltrf216a_set_int_time(data, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltrf216a_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(ltrf216a_int_time_available) * 2;
+ *vals = (const int *)ltrf216a_int_time_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltrf216a_info = {
+ .read_raw = ltrf216a_read_raw,
+ .write_raw = ltrf216a_write_raw,
+ .read_avail = ltrf216a_read_available,
+};
+
+static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_CTRL:
+ case LTRF216A_ALS_MEAS_RES:
+ case LTRF216A_ALS_GAIN:
+ case LTRF216A_PART_ID:
+ case LTRF216A_MAIN_STATUS:
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ case LTRF216A_ALS_DATA_0:
+ case LTRF216A_ALS_DATA_1:
+ case LTRF216A_ALS_DATA_2:
+ case LTRF216A_INT_CFG:
+ case LTRF216A_INT_PST:
+ case LTRF216A_ALS_THRES_UP_0:
+ case LTRF216A_ALS_THRES_UP_1:
+ case LTRF216A_ALS_THRES_UP_2:
+ case LTRF216A_ALS_THRES_LOW_0:
+ case LTRF216A_ALS_THRES_LOW_1:
+ case LTRF216A_ALS_THRES_LOW_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_CTRL:
+ case LTRF216A_ALS_MEAS_RES:
+ case LTRF216A_ALS_GAIN:
+ case LTRF216A_INT_CFG:
+ case LTRF216A_INT_PST:
+ case LTRF216A_ALS_THRES_UP_0:
+ case LTRF216A_ALS_THRES_UP_1:
+ case LTRF216A_ALS_THRES_UP_2:
+ case LTRF216A_ALS_THRES_LOW_0:
+ case LTRF216A_ALS_THRES_LOW_1:
+ case LTRF216A_ALS_THRES_LOW_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_STATUS:
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ case LTRF216A_ALS_DATA_0:
+ case LTRF216A_ALS_DATA_1:
+ case LTRF216A_ALS_DATA_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_precious_reg(struct device *dev, unsigned int reg)
+{
+ return reg == LTRF216A_MAIN_STATUS;
+}
+
+static const struct regmap_config ltrf216a_regmap_config = {
+ .name = "ltrf216a",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = LTRF216A_ALS_THRES_LOW_2,
+ .readable_reg = ltrf216a_readable_reg,
+ .writeable_reg = ltrf216a_writable_reg,
+ .volatile_reg = ltrf216a_volatile_reg,
+ .precious_reg = ltrf216a_precious_reg,
+ .disable_locking = true,
+};
+
+static int ltrf216a_probe(struct i2c_client *client)
+{
+ struct ltrf216a_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &ltrf216a_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(data->regmap),
+ "regmap initialization failed\n");
+
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ indio_dev->info = &ltrf216a_info;
+ indio_dev->name = "ltrf216a";
+ indio_dev->channels = ltrf216a_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltrf216a_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ return ret;
+
+ /* reset sensor, chip fails to respond to this, so ignore any errors */
+ ltrf216a_reset(indio_dev);
+
+ ret = regmap_reinit_cache(data->regmap, &ltrf216a_regmap_config);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to reinit regmap cache\n");
+
+ ret = ltrf216a_enable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, ltrf216a_cleanup,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&client->dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ data->int_time = 100000;
+ data->int_time_fac = 100;
+ data->als_gain_fac = 3;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int ltrf216a_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = ltrf216a_disable(indio_dev);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(data->regmap, true);
+
+ return 0;
+}
+
+static int ltrf216a_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ regcache_cache_only(data->regmap, false);
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ goto cache_only;
+
+ ret = ltrf216a_enable(indio_dev);
+ if (ret)
+ goto cache_only;
+
+ return 0;
+
+cache_only:
+ regcache_cache_only(data->regmap, true);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ltrf216a_pm_ops, ltrf216a_runtime_suspend,
+ ltrf216a_runtime_resume, NULL);
+
+static const struct i2c_device_id ltrf216a_id[] = {
+ { "ltrf216a" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltrf216a_id);
+
+static const struct of_device_id ltrf216a_of_match[] = {
+ { .compatible = "liteon,ltrf216a" },
+ { .compatible = "ltr,ltrf216a" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ltrf216a_of_match);
+
+static struct i2c_driver ltrf216a_driver = {
+ .driver = {
+ .name = "ltrf216a",
+ .pm = pm_ptr(&ltrf216a_pm_ops),
+ .of_match_table = ltrf216a_of_match,
+ },
+ .probe_new = ltrf216a_probe,
+ .id_table = ltrf216a_id,
+};
+module_i2c_driver(ltrf216a_driver);
+
+MODULE_AUTHOR("Shreeya Patel <shreeya.patel@collabora.com>");
+MODULE_AUTHOR("Shi Zhigang <Zhigang.Shi@liteon.com>");
+MODULE_DESCRIPTION("LTRF216A ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 3d4cc1180b6a..c737d3e193ae 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -325,7 +325,7 @@ int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap)
}
EXPORT_SYMBOL_NS(st_uvis25_probe, IIO_UVIS25);
-static int __maybe_unused st_uvis25_suspend(struct device *dev)
+static int st_uvis25_suspend(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct st_uvis25_hw *hw = iio_priv(iio_dev);
@@ -334,7 +334,7 @@ static int __maybe_unused st_uvis25_suspend(struct device *dev)
ST_UVIS25_REG_ODR_MASK, 0);
}
-static int __maybe_unused st_uvis25_resume(struct device *dev)
+static int st_uvis25_resume(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct st_uvis25_hw *hw = iio_priv(iio_dev);
@@ -346,10 +346,7 @@ static int __maybe_unused st_uvis25_resume(struct device *dev)
return 0;
}
-const struct dev_pm_ops st_uvis25_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(st_uvis25_suspend, st_uvis25_resume)
-};
-EXPORT_SYMBOL_NS(st_uvis25_pm_ops, IIO_UVIS25);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(st_uvis25_pm_ops, st_uvis25_suspend, st_uvis25_resume, IIO_UVIS25);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_DESCRIPTION("STMicroelectronics uvis25 sensor driver");
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index b06d09af28a3..c982b0b255cf 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(i2c, st_uvis25_i2c_id_table);
static struct i2c_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_i2c",
- .pm = &st_uvis25_pm_ops,
+ .pm = pm_sleep_ptr(&st_uvis25_pm_ops),
.of_match_table = st_uvis25_i2c_of_match,
},
.probe = st_uvis25_i2c_probe,
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index 3a4dc6d7180c..86a232320d7d 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(spi, st_uvis25_spi_id_table);
static struct spi_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_spi",
- .pm = &st_uvis25_pm_ops,
+ .pm = pm_sleep_ptr(&st_uvis25_pm_ops),
.of_match_table = st_uvis25_spi_of_match,
},
.probe = st_uvis25_spi_probe,
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 07eb619bcfe8..b91fc5e6a26e 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -216,8 +216,8 @@ config YAMAHA_YAS530
select IIO_TRIGGERED_BUFFER
help
Say Y here to add support for the Yamaha YAS530 series of
- 3-Axis Magnetometers. Right now YAS530, YAS532 and YAS533 are
- fully supported.
+ 3-Axis Magnetometers. YAS530, YAS532, YAS533 and YAS537 are
+ supported.
This driver can also be compiled as a module.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 9120c8bbf3dd..60fbb5431c88 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -52,16 +52,5 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
enum hmc5843_ids id, const char *name);
void hmc5843_common_remove(struct device *dev);
-int hmc5843_common_suspend(struct device *dev);
-int hmc5843_common_resume(struct device *dev);
-
-#ifdef CONFIG_PM_SLEEP
-static __maybe_unused SIMPLE_DEV_PM_OPS(hmc5843_pm_ops,
- hmc5843_common_suspend,
- hmc5843_common_resume);
-#define HMC5843_PM_OPS (&hmc5843_pm_ops)
-#else
-#define HMC5843_PM_OPS NULL
-#endif
-
+extern const struct dev_pm_ops hmc5843_pm_ops;
#endif /* HMC5843_CORE_H */
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 4a63b2da9df0..c5521d61da29 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -603,19 +603,19 @@ static const struct iio_info hmc5843_info = {
static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
-int hmc5843_common_suspend(struct device *dev)
+static int hmc5843_common_suspend(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
HMC5843_MODE_SLEEP);
}
-EXPORT_SYMBOL_NS(hmc5843_common_suspend, IIO_HMC5843);
-int hmc5843_common_resume(struct device *dev)
+static int hmc5843_common_resume(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
HMC5843_MODE_CONVERSION_CONTINUOUS);
}
-EXPORT_SYMBOL_NS(hmc5843_common_resume, IIO_HMC5843);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_common_suspend,
+ hmc5843_common_resume, IIO_HMC5843);
int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
enum hmc5843_ids id, const char *name)
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index fe5e8415b2f2..18a13dd51296 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(of, hmc5843_of_match);
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
- .pm = HMC5843_PM_OPS,
+ .pm = pm_sleep_ptr(&hmc5843_pm_ops),
.of_match_table = hmc5843_of_match,
},
.id_table = hmc5843_id,
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 8403f09aba39..c42d2e2a6a6c 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -86,13 +86,13 @@ static const struct spi_device_id hmc5843_id[] = {
MODULE_DEVICE_TABLE(spi, hmc5843_id);
static struct spi_driver hmc5843_driver = {
- .driver = {
- .name = "hmc5843",
- .pm = HMC5843_PM_OPS,
- },
- .id_table = hmc5843_id,
- .probe = hmc5843_spi_probe,
- .remove = hmc5843_spi_remove,
+ .driver = {
+ .name = "hmc5843",
+ .pm = pm_sleep_ptr(&hmc5843_pm_ops),
+ },
+ .id_table = hmc5843_id,
+ .probe = hmc5843_spi_probe,
+ .remove = hmc5843_spi_remove,
};
module_spi_driver(hmc5843_driver);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index c3a10942654e..801c760feb4d 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -10,13 +10,16 @@
* (YAS534 is a magnetic switch, not handled)
* YAS535 MS-6C
* YAS536 MS-3W
- * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Xiaomi)
+ * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Galaxy S7)
* YAS539 MS-3S (2018 Samsung Galaxy A7 SM-A750FN)
*
* Code functions found in the MPU3050 YAS530 and YAS532 drivers
* named "inv_compass" in the Tegra Android kernel tree.
* Copyright (C) 2012 InvenSense Corporation
*
+ * Code functions for YAS537 based on Yamaha Android kernel driver.
+ * Copyright (c) 2014 Yamaha Corporation
+ *
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
#include <linux/bitfield.h>
@@ -29,9 +32,11 @@
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/random.h>
+#include <linux/units.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
@@ -40,20 +45,39 @@
#include <asm/unaligned.h>
-/* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
+/* Commonly used registers */
#define YAS5XX_DEVICE_ID 0x80
-#define YAS5XX_ACTUATE_INIT_COIL 0x81
-#define YAS5XX_MEASURE 0x82
-#define YAS5XX_CONFIG 0x83
-#define YAS5XX_MEASURE_INTERVAL 0x84
-#define YAS5XX_OFFSET_X 0x85 /* [-31 .. 31] */
-#define YAS5XX_OFFSET_Y1 0x86 /* [-31 .. 31] */
-#define YAS5XX_OFFSET_Y2 0x87 /* [-31 .. 31] */
-#define YAS5XX_TEST1 0x88
-#define YAS5XX_TEST2 0x89
-#define YAS5XX_CAL 0x90
#define YAS5XX_MEASURE_DATA 0xB0
+/* These registers are used by YAS530, YAS532 and YAS533 */
+#define YAS530_ACTUATE_INIT_COIL 0x81
+#define YAS530_MEASURE 0x82
+#define YAS530_CONFIG 0x83
+#define YAS530_MEASURE_INTERVAL 0x84
+#define YAS530_OFFSET_X 0x85 /* [-31 .. 31] */
+#define YAS530_OFFSET_Y1 0x86 /* [-31 .. 31] */
+#define YAS530_OFFSET_Y2 0x87 /* [-31 .. 31] */
+#define YAS530_TEST1 0x88
+#define YAS530_TEST2 0x89
+#define YAS530_CAL 0x90
+
+/* Registers used by YAS537 */
+#define YAS537_MEASURE 0x81 /* Originally YAS537_REG_CMDR */
+#define YAS537_CONFIG 0x82 /* Originally YAS537_REG_CONFR */
+#define YAS537_MEASURE_INTERVAL 0x83 /* Originally YAS537_REG_INTRVLR */
+#define YAS537_OFFSET_X 0x84 /* Originally YAS537_REG_OXR */
+#define YAS537_OFFSET_Y1 0x85 /* Originally YAS537_REG_OY1R */
+#define YAS537_OFFSET_Y2 0x86 /* Originally YAS537_REG_OY2R */
+#define YAS537_AVR 0x87
+#define YAS537_HCK 0x88
+#define YAS537_LCK 0x89
+#define YAS537_SRST 0x90
+#define YAS537_ADCCAL 0x91
+#define YAS537_MTC 0x93
+#define YAS537_OC 0x9E
+#define YAS537_TRM 0x9F
+#define YAS537_CAL 0xC0
+
/* Bits in the YAS5xx config register */
#define YAS5XX_CONFIG_INTON BIT(0) /* Interrupt on? */
#define YAS5XX_CONFIG_INTHACT BIT(1) /* Interrupt active high? */
@@ -65,6 +89,7 @@
#define YAS5XX_MEASURE_LDTC BIT(1)
#define YAS5XX_MEASURE_FORS BIT(2)
#define YAS5XX_MEASURE_DLYMES BIT(4)
+#define YAS5XX_MEASURE_CONT BIT(5)
/* Bits in the measure data register */
#define YAS5XX_MEASURE_DATA_BUSY BIT(7)
@@ -88,33 +113,101 @@
#define YAS532_DATA_BITS 13
#define YAS532_DATA_CENTER BIT(YAS532_DATA_BITS - 1)
#define YAS532_DATA_OVERFLOW (BIT(YAS532_DATA_BITS) - 1)
-#define YAS532_20DEGREES 390 /* Looks like Kelvin */
-/* These variant IDs are known from code dumps */
#define YAS537_DEVICE_ID 0x07 /* YAS537 (MS-3T) */
-#define YAS539_DEVICE_ID 0x08 /* YAS539 (MS-3S) */
+#define YAS537_VERSION_0 0 /* Version naming unknown */
+#define YAS537_VERSION_1 1 /* Version naming unknown */
+#define YAS537_MAG_AVERAGE_32_MASK GENMASK(6, 4)
+#define YAS537_MEASURE_TIME_WORST_US 1500
+#define YAS537_DEFAULT_SENSOR_DELAY_MS 50
+#define YAS537_MAG_RCOIL_TIME_US 65
+#define YAS537_MTC3_MASK_PREP GENMASK(7, 0)
+#define YAS537_MTC3_MASK_GET GENMASK(7, 5)
+#define YAS537_MTC3_ADD_BIT BIT(4)
+#define YAS537_HCK_MASK_PREP GENMASK(4, 0)
+#define YAS537_HCK_MASK_GET GENMASK(7, 4)
+#define YAS537_LCK_MASK_PREP GENMASK(4, 0)
+#define YAS537_LCK_MASK_GET GENMASK(3, 0)
+#define YAS537_OC_MASK_GET GENMASK(5, 0)
/* Turn off device regulators etc after 5 seconds of inactivity */
#define YAS5XX_AUTOSUSPEND_DELAY_MS 5000
+enum chip_ids {
+ yas530,
+ yas532,
+ yas533,
+ yas537,
+};
+
+static const int yas530_volatile_reg[] = {
+ YAS530_ACTUATE_INIT_COIL,
+ YAS530_MEASURE,
+};
+
+static const int yas537_volatile_reg[] = {
+ YAS537_MEASURE,
+};
+
struct yas5xx_calibration {
/* Linearization calibration x, y1, y2 */
s32 r[3];
u32 f[3];
/* Temperature compensation calibration */
- s32 Cx, Cy1, Cy2;
+ s16 Cx, Cy1, Cy2;
/* Misc calibration coefficients */
- s32 a2, a3, a4, a5, a6, a7, a8, a9, k;
+ s8 a2, a3, a4, a6, a7, a8;
+ s16 a5, a9;
+ u8 k;
/* clock divider */
u8 dck;
};
+struct yas5xx;
+
+/**
+ * struct yas5xx_chip_info - device-specific data and function pointers
+ * @devid: device ID number
+ * @product_name: product name of the YAS variant
+ * @version_names: version letters or namings
+ * @volatile_reg: device-specific volatile registers
+ * @volatile_reg_qty: quantity of device-specific volatile registers
+ * @scaling_val2: scaling value for IIO_CHAN_INFO_SCALE
+ * @t_ref: number of counts at reference temperature 20 °C
+ * @min_temp_x10: starting point of temperature counting in 1/10:s degrees Celsius
+ * @get_measure: function pointer to get a measurement
+ * @get_calibration_data: function pointer to get calibration data
+ * @dump_calibration: function pointer to dump calibration for debugging
+ * @measure_offsets: function pointer to measure the offsets
+ * @power_on: function pointer to power-on procedure
+ *
+ * The "t_ref" value for YAS532/533 is known from the Android driver.
+ * For YAS530 and YAS537 it was approximately measured.
+ *
+ * The temperatures "min_temp_x10" are derived from the temperature resolutions
+ * given in the data sheets.
+ */
+struct yas5xx_chip_info {
+ unsigned int devid;
+ const char *product_name;
+ const char *version_names[2];
+ const int *volatile_reg;
+ int volatile_reg_qty;
+ u32 scaling_val2;
+ u16 t_ref;
+ s16 min_temp_x10;
+ int (*get_measure)(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo);
+ int (*get_calibration_data)(struct yas5xx *yas5xx);
+ void (*dump_calibration)(struct yas5xx *yas5xx);
+ int (*measure_offsets)(struct yas5xx *yas5xx);
+ int (*power_on)(struct yas5xx *yas5xx);
+};
+
/**
* struct yas5xx - state container for the YAS5xx driver
* @dev: parent device pointer
- * @devid: device ID number
+ * @chip_info: device-specific data and function pointers
* @version: device version
- * @name: device name
* @calibration: calibration settings from the OTP storage
* @hard_offsets: offsets for each axis measured with initcoil actuated
* @orientation: mounting matrix, flipped axis etc
@@ -128,11 +221,10 @@ struct yas5xx_calibration {
*/
struct yas5xx {
struct device *dev;
- unsigned int devid;
+ const struct yas5xx_chip_info *chip_info;
unsigned int version;
- char name[16];
struct yas5xx_calibration calibration;
- u8 hard_offsets[3];
+ s8 hard_offsets[3];
struct iio_mount_matrix orientation;
struct regmap *map;
struct regulator_bulk_data regs[2];
@@ -179,23 +271,26 @@ static u16 yas532_extract_axis(u8 *data)
}
/**
- * yas5xx_measure() - Make a measure from the hardware
+ * yas530_measure() - Make a measure from the hardware
* @yas5xx: The device state
* @t: the raw temperature measurement
* @x: the raw x axis measurement
* @y1: the y1 axis measurement
* @y2: the y2 axis measurement
* @return: 0 on success or error code
+ *
+ * Used by YAS530, YAS532 and YAS533.
*/
-static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
+static int yas530_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
unsigned int busy;
u8 data[8];
int ret;
u16 val;
mutex_lock(&yas5xx->lock);
- ret = regmap_write(yas5xx->map, YAS5XX_MEASURE, YAS5XX_MEASURE_START);
+ ret = regmap_write(yas5xx->map, YAS530_MEASURE, YAS5XX_MEASURE_START);
if (ret < 0)
goto out_unlock;
@@ -219,7 +314,7 @@ static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
mutex_unlock(&yas5xx->lock);
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
/*
* The t value is 9 bits in big endian format
@@ -261,8 +356,81 @@ out_unlock:
return ret;
}
-static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
+/**
+ * yas537_measure() - Make a measure from the hardware
+ * @yas5xx: The device state
+ * @t: the raw temperature measurement
+ * @x: the raw x axis measurement
+ * @y1: the y1 axis measurement
+ * @y2: the y2 axis measurement
+ * @return: 0 on success or error code
+ */
+static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ unsigned int busy;
+ u8 data[8];
+ u16 xy1y2[3];
+ s32 h[3], s[3];
+ int i, ret;
+
+ mutex_lock(&yas5xx->lock);
+
+ /* Contrary to YAS530/532, also a "cont" bit is set, meaning unknown */
+ ret = regmap_write(yas5xx->map, YAS537_MEASURE, YAS5XX_MEASURE_START |
+ YAS5XX_MEASURE_CONT);
+ if (ret < 0)
+ goto out_unlock;
+
+ /* Use same timeout like YAS530/532 but the bit is in data row 2 */
+ ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA + 2, busy,
+ !(busy & YAS5XX_MEASURE_DATA_BUSY),
+ 500, 20000);
+ if (ret) {
+ dev_err(yas5xx->dev, "timeout waiting for measurement\n");
+ goto out_unlock;
+ }
+
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
+ data, sizeof(data));
+ if (ret)
+ goto out_unlock;
+
+ mutex_unlock(&yas5xx->lock);
+
+ *t = get_unaligned_be16(&data[0]);
+ xy1y2[0] = FIELD_GET(GENMASK(13, 0), get_unaligned_be16(&data[2]));
+ xy1y2[1] = get_unaligned_be16(&data[4]);
+ xy1y2[2] = get_unaligned_be16(&data[6]);
+
+ /* The second version of YAS537 needs to include calibration coefficients */
+ if (yas5xx->version == YAS537_VERSION_1) {
+ for (i = 0; i < 3; i++)
+ s[i] = xy1y2[i] - BIT(13);
+ h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
+ h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
+ h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
+ for (i = 0; i < 3; i++) {
+ clamp_val(h[i], -BIT(13), BIT(13) - 1);
+ xy1y2[i] = h[i] + BIT(13);
+ }
+ }
+
+ *x = xy1y2[0];
+ *y1 = xy1y2[1];
+ *y2 = xy1y2[2];
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&yas5xx->lock);
+ return ret;
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static s32 yas530_linearize(struct yas5xx *yas5xx, u16 val, int axis)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
struct yas5xx_calibration *c = &yas5xx->calibration;
static const s32 yas532ac_coef[] = {
YAS532_VERSION_AC_COEF_X,
@@ -272,7 +440,7 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
s32 coef;
/* Select coefficients */
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
if (yas5xx->version == YAS530_VERSION_A)
coef = YAS530_VERSION_A_COEF;
@@ -302,8 +470,24 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
(yas5xx->hard_offsets[axis] - c->r[axis]) * coef;
}
+static s32 yas5xx_calc_temperature(struct yas5xx *yas5xx, u16 t)
+{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
+ s32 to;
+ u16 t_ref;
+ s16 min_temp_x10;
+ int ref_temp_x10;
+
+ t_ref = ci->t_ref;
+ min_temp_x10 = ci->min_temp_x10;
+ ref_temp_x10 = 200;
+
+ to = (min_temp_x10 + ((ref_temp_x10 - min_temp_x10) * t / t_ref)) * 100;
+ return to;
+}
+
/**
- * yas5xx_get_measure() - Measure a sample of all axis and process
+ * yas530_get_measure() - Measure a sample of all axis and process
* @yas5xx: The device state
* @to: Temperature out
* @xo: X axis out
@@ -311,36 +495,50 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
* @zo: Z axis out
* @return: 0 on success or error code
*
- * Returned values are in nanotesla according to some code.
+ * Used by YAS530, YAS532 and YAS533.
*/
-static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+static int yas530_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
struct yas5xx_calibration *c = &yas5xx->calibration;
- u16 t, x, y1, y2;
- /* These are "signed x, signed y1 etc */
+ u16 t_ref, t_comp, t, x, y1, y2;
+ /* These are signed x, signed y1 etc */
s32 sx, sy1, sy2, sy, sz;
int ret;
/* We first get raw data that needs to be translated to [x,y,z] */
- ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ ret = yas530_measure(yas5xx, &t, &x, &y1, &y2);
if (ret)
return ret;
/* Do some linearization if available */
- sx = yas5xx_linearize(yas5xx, x, 0);
- sy1 = yas5xx_linearize(yas5xx, y1, 1);
- sy2 = yas5xx_linearize(yas5xx, y2, 2);
+ sx = yas530_linearize(yas5xx, x, 0);
+ sy1 = yas530_linearize(yas5xx, y1, 1);
+ sy2 = yas530_linearize(yas5xx, y2, 2);
+
+ /*
+ * Set the temperature for compensation (unit: counts):
+ * YAS532/YAS533 version AC uses the temperature deviation as a
+ * multiplier. YAS530 and YAS532 version AB use solely the t value.
+ */
+ t_ref = ci->t_ref;
+ if (ci->devid == YAS532_DEVICE_ID &&
+ yas5xx->version == YAS532_VERSION_AC) {
+ t_comp = t - t_ref;
+ } else {
+ t_comp = t;
+ }
/*
* Temperature compensation for x, y1, y2 respectively:
*
- * Cx * t
- * x' = x - ------
- * 100
+ * Cx * t_comp
+ * x' = x - -----------
+ * 100
*/
- sx = sx - (c->Cx * t) / 100;
- sy1 = sy1 - (c->Cy1 * t) / 100;
- sy2 = sy2 - (c->Cy2 * t) / 100;
+ sx = sx - (c->Cx * t_comp) / 100;
+ sy1 = sy1 - (c->Cy1 * t_comp) / 100;
+ sy2 = sy2 - (c->Cy2 * t_comp) / 100;
/*
* Break y1 and y2 into y and z, y1 and y2 are apparently encoding
@@ -349,11 +547,9 @@ static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo,
sy = sy1 - sy2;
sz = -sy1 - sy2;
- /*
- * FIXME: convert to Celsius? Just guessing this is given
- * as 1/10:s of degrees so multiply by 100 to get millicentigrades.
- */
- *to = t * 100;
+ /* Calculate temperature readout */
+ *to = yas5xx_calc_temperature(yas5xx, t);
+
/*
* Calibrate [x,y,z] with some formulas like this:
*
@@ -376,19 +572,56 @@ static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo,
return 0;
}
+/**
+ * yas537_get_measure() - Measure a sample of all axis and process
+ * @yas5xx: The device state
+ * @to: Temperature out
+ * @xo: X axis out
+ * @yo: Y axis out
+ * @zo: Z axis out
+ * @return: 0 on success or error code
+ */
+static int yas537_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+{
+ u16 t, x, y1, y2;
+ int ret;
+
+ /* We first get raw data that needs to be translated to [x,y,z] */
+ ret = yas537_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+
+ /* Calculate temperature readout */
+ *to = yas5xx_calc_temperature(yas5xx, t);
+
+ /*
+ * Unfortunately, no linearization or temperature compensation formulas
+ * are known for YAS537.
+ */
+
+ /* Calculate x, y, z from x, y1, y2 */
+ *xo = (x - BIT(13)) * 300;
+ *yo = (y1 - y2) * 1732 / 10;
+ *zo = (-y1 - y2 + BIT(14)) * 300;
+
+ return 0;
+}
+
static int yas5xx_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
s32 t, x, y, z;
int ret;
switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
pm_runtime_get_sync(yas5xx->dev);
- ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret)
@@ -412,19 +645,8 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (chan->address == 0) {
- /* Temperature is unscaled */
- *val = 1;
- return IIO_VAL_INT;
- }
- /*
- * The axis values are in nanotesla according to the vendor
- * drivers, but is clearly in microtesla according to
- * experiments. Since 1 uT = 0.01 Gauss, we need to divide
- * by 100000000 (10^8) to get to Gauss from the raw value.
- */
*val = 1;
- *val2 = 100000000;
+ *val2 = ci->scaling_val2;
return IIO_VAL_FRACTIONAL;
default:
/* Unknown request */
@@ -435,11 +657,12 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
{
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
s32 t, x, y, z;
int ret;
pm_runtime_get_sync(yas5xx->dev);
- ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret) {
@@ -505,7 +728,7 @@ static const struct iio_chan_spec yas5xx_channels[] = {
.address = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 32,
.storagebits = 32,
.endianness = IIO_CPU,
@@ -525,9 +748,26 @@ static const struct iio_info yas5xx_info = {
static bool yas5xx_volatile_reg(struct device *dev, unsigned int reg)
{
- return reg == YAS5XX_ACTUATE_INIT_COIL ||
- reg == YAS5XX_MEASURE ||
- (reg >= YAS5XX_MEASURE_DATA && reg <= YAS5XX_MEASURE_DATA + 8);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
+ int reg_qty;
+ int i;
+
+ if (reg >= YAS5XX_MEASURE_DATA && reg < YAS5XX_MEASURE_DATA + 8)
+ return true;
+
+ /*
+ * YAS versions share different registers on the same address,
+ * need to differentiate.
+ */
+ reg_qty = ci->volatile_reg_qty;
+ for (i = 0; i < reg_qty; i++) {
+ if (reg == ci->volatile_reg[i])
+ return true;
+ }
+
+ return false;
}
/* TODO: enable regmap cache, using mark dirty and sync at runtime resume */
@@ -539,11 +779,13 @@ static const struct regmap_config yas5xx_regmap_config = {
};
/**
- * yas53x_extract_calibration() - extracts the a2-a9 and k calibration
+ * yas530_extract_calibration() - extracts the a2-a9 and k calibration
* @data: the bitfield to use
* @c: the calibration to populate
+ *
+ * Used by YAS530, YAS532 and YAS533.
*/
-static void yas53x_extract_calibration(u8 *data, struct yas5xx_calibration *c)
+static void yas530_extract_calibration(u8 *data, struct yas5xx_calibration *c)
{
u64 val = get_unaligned_be64(data);
@@ -581,24 +823,27 @@ static int yas530_get_calibration_data(struct yas5xx *yas5xx)
int ret;
/* Dummy read, first read is ALWAYS wrong */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
/* Actual calibration readout */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
- dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+ dev_dbg(yas5xx->dev, "calibration data: %16ph\n", data);
+ /* Contribute calibration data to the input pool for kernel entropy */
add_device_randomness(data, sizeof(data));
+
+ /* Extract version */
yas5xx->version = data[15] & GENMASK(1, 0);
/* Extract the calibration from the bitfield */
c->Cx = data[0] * 6 - 768;
c->Cy1 = data[1] * 6 - 768;
c->Cy2 = data[2] * 6 - 768;
- yas53x_extract_calibration(&data[3], c);
+ yas530_extract_calibration(&data[3], c);
/*
* Extract linearization:
@@ -618,6 +863,7 @@ static int yas530_get_calibration_data(struct yas5xx *yas5xx)
c->r[0] = sign_extend32(FIELD_GET(GENMASK(28, 23), val), 5);
c->r[1] = sign_extend32(FIELD_GET(GENMASK(20, 15), val), 5);
c->r[2] = sign_extend32(FIELD_GET(GENMASK(12, 7), val), 5);
+
return 0;
}
@@ -629,22 +875,22 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
int ret;
/* Dummy read, first read is ALWAYS wrong */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
/* Actual calibration readout */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
- dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+ dev_dbg(yas5xx->dev, "calibration data: %14ph\n", data);
/* Sanity check, is this all zeroes? */
- if (memchr_inv(data, 0x00, 13) == NULL) {
- if (!(data[13] & BIT(7)))
- dev_warn(yas5xx->dev, "calibration is blank!\n");
- }
+ if (!memchr_inv(data, 0x00, 13) && !(data[13] & BIT(7)))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+ /* Contribute calibration data to the input pool for kernel entropy */
add_device_randomness(data, sizeof(data));
+
/* Only one bit of version info reserved here as far as we know */
yas5xx->version = data[13] & BIT(0);
@@ -652,7 +898,8 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
c->Cx = data[0] * 10 - 1280;
c->Cy1 = data[1] * 10 - 1280;
c->Cy2 = data[2] * 10 - 1280;
- yas53x_extract_calibration(&data[3], c);
+ yas530_extract_calibration(&data[3], c);
+
/*
* Extract linearization:
* Linearization layout in the 32 bits at byte 10:
@@ -675,7 +922,204 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
return 0;
}
-static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
+static int yas537_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[17];
+ u32 val1, val2, val3, val4;
+ int i, ret;
+
+ /* Writing SRST register */
+ ret = regmap_write(yas5xx->map, YAS537_SRST, BIT(1));
+ if (ret)
+ return ret;
+
+ /* Calibration readout, YAS537 needs one readout only */
+ ret = regmap_bulk_read(yas5xx->map, YAS537_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %17ph\n", data);
+
+ /* Sanity check, is this all zeroes? */
+ if (!memchr_inv(data, 0x00, 16) && !FIELD_GET(GENMASK(5, 0), data[16]))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+
+ /* Contribute calibration data to the input pool for kernel entropy */
+ add_device_randomness(data, sizeof(data));
+
+ /* Extract version information */
+ yas5xx->version = FIELD_GET(GENMASK(7, 6), data[16]);
+
+ /* There are two versions of YAS537 behaving differently */
+ switch (yas5xx->version) {
+ case YAS537_VERSION_0:
+ /*
+ * The first version simply writes data back into registers:
+ *
+ * data[0] YAS537_MTC 0x93
+ * data[1] 0x94
+ * data[2] 0x95
+ * data[3] 0x96
+ * data[4] 0x97
+ * data[5] 0x98
+ * data[6] 0x99
+ * data[7] 0x9a
+ * data[8] 0x9b
+ * data[9] 0x9c
+ * data[10] 0x9d
+ * data[11] YAS537_OC 0x9e
+ *
+ * data[12] YAS537_OFFSET_X 0x84
+ * data[13] YAS537_OFFSET_Y1 0x85
+ * data[14] YAS537_OFFSET_Y2 0x86
+ *
+ * data[15] YAS537_HCK 0x88
+ * data[16] YAS537_LCK 0x89
+ */
+ for (i = 0; i < 12; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_MTC + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
+ data[i + 12]);
+ if (ret)
+ return ret;
+ yas5xx->hard_offsets[i] = data[i + 12];
+ }
+ for (i = 0; i < 2; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_HCK + i,
+ data[i + 15]);
+ if (ret)
+ return ret;
+ }
+ break;
+ case YAS537_VERSION_1:
+ /*
+ * The second version writes some data into registers but also
+ * extracts calibration coefficients.
+ *
+ * Registers being written:
+ *
+ * data[0] YAS537_MTC 0x93
+ * data[1] YAS537_MTC+1 0x94
+ * data[2] YAS537_MTC+2 0x95
+ * data[3] YAS537_MTC+3 (partially) 0x96
+ *
+ * data[12] YAS537_OFFSET_X 0x84
+ * data[13] YAS537_OFFSET_Y1 0x85
+ * data[14] YAS537_OFFSET_Y2 0x86
+ *
+ * data[15] YAS537_HCK (partially) 0x88
+ * YAS537_LCK (partially) 0x89
+ * data[16] YAS537_OC (partially) 0x9e
+ */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_MTC + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
+ data[i + 12]);
+ if (ret)
+ return ret;
+ yas5xx->hard_offsets[i] = data[i + 12];
+ }
+ /*
+ * Visualization of partially taken data:
+ *
+ * data[3] n 7 6 5 4 3 2 1 0
+ * YAS537_MTC+3 x x x 1 0 0 0 0
+ *
+ * data[15] n 7 6 5 4 3 2 1 0
+ * YAS537_HCK x x x x 0
+ *
+ * data[15] n 7 6 5 4 3 2 1 0
+ * YAS537_LCK x x x x 0
+ *
+ * data[16] n 7 6 5 4 3 2 1 0
+ * YAS537_OC x x x x x x
+ */
+ ret = regmap_write(yas5xx->map, YAS537_MTC + 3,
+ FIELD_PREP(YAS537_MTC3_MASK_PREP,
+ FIELD_GET(YAS537_MTC3_MASK_GET, data[3])) |
+ YAS537_MTC3_ADD_BIT);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_HCK,
+ FIELD_PREP(YAS537_HCK_MASK_PREP,
+ FIELD_GET(YAS537_HCK_MASK_GET, data[15])));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_LCK,
+ FIELD_PREP(YAS537_LCK_MASK_PREP,
+ FIELD_GET(YAS537_LCK_MASK_GET, data[15])));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_OC,
+ FIELD_GET(YAS537_OC_MASK_GET, data[16]));
+ if (ret)
+ return ret;
+ /*
+ * For data extraction, build some blocks. Four 32-bit blocks
+ * look appropriate.
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[0] 0 [ Cx Cx Cx Cx Cx Cx Cx Cx ] bits 31 .. 24
+ * data[1] 1 [ Cx C1 C1 C1 C1 C1 C1 C1 ] bits 23 .. 16
+ * data[2] 2 [ C1 C1 C2 C2 C2 C2 C2 C2 ] bits 15 .. 8
+ * data[3] 3 [ C2 C2 C2 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[3] 0 [ a2 a2 a2 a2 a2 ] bits 31 .. 24
+ * data[4] 1 [ a2 a2 a3 a3 a3 a3 a3 a3 ] bits 23 .. 16
+ * data[5] 2 [ a3 a4 a4 a4 a4 a4 a4 a4 ] bits 15 .. 8
+ * data[6] 3 [ a4 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[6] 0 [ a5 a5 a5 a5 a5 a5 a5 ] bits 31 .. 24
+ * data[7] 1 [ a5 a5 a6 a6 a6 a6 a6 a6 ] bits 23 .. 16
+ * data[8] 2 [ a6 a7 a7 a7 a7 a7 a7 a7 ] bits 15 .. 8
+ * data[9] 3 [ a7 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[9] 0 [ a8 a8 a8 a8 a8 a8 a8 ] bits 31 .. 24
+ * data[10] 1 [ a9 a9 a9 a9 a9 a9 a9 a9 ] bits 23 .. 16
+ * data[11] 2 [ a9 k k k k k k k ] bits 15 .. 8
+ * data[12] 3 [ ] bits 7 .. 0
+ */
+ val1 = get_unaligned_be32(&data[0]);
+ val2 = get_unaligned_be32(&data[3]);
+ val3 = get_unaligned_be32(&data[6]);
+ val4 = get_unaligned_be32(&data[9]);
+ /* Extract calibration coefficients and modify */
+ c->Cx = FIELD_GET(GENMASK(31, 23), val1) - 256;
+ c->Cy1 = FIELD_GET(GENMASK(22, 14), val1) - 256;
+ c->Cy2 = FIELD_GET(GENMASK(13, 5), val1) - 256;
+ c->a2 = FIELD_GET(GENMASK(28, 22), val2) - 64;
+ c->a3 = FIELD_GET(GENMASK(21, 15), val2) - 64;
+ c->a4 = FIELD_GET(GENMASK(14, 7), val2) - 128;
+ c->a5 = FIELD_GET(GENMASK(30, 22), val3) - 112;
+ c->a6 = FIELD_GET(GENMASK(21, 15), val3) - 64;
+ c->a7 = FIELD_GET(GENMASK(14, 7), val3) - 128;
+ c->a8 = FIELD_GET(GENMASK(30, 24), val4) - 64;
+ c->a9 = FIELD_GET(GENMASK(23, 15), val4) - 112;
+ c->k = FIELD_GET(GENMASK(14, 8), val4);
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown version of YAS537\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static void yas530_dump_calibration(struct yas5xx *yas5xx)
{
struct yas5xx_calibration *c = &yas5xx->calibration;
@@ -698,20 +1142,42 @@ static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
dev_dbg(yas5xx->dev, "dck = %d\n", c->dck);
}
-static int yas5xx_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
+static void yas537_dump_calibration(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+
+ if (yas5xx->version == YAS537_VERSION_1) {
+ dev_dbg(yas5xx->dev, "Cx = %d\n", c->Cx);
+ dev_dbg(yas5xx->dev, "Cy1 = %d\n", c->Cy1);
+ dev_dbg(yas5xx->dev, "Cy2 = %d\n", c->Cy2);
+ dev_dbg(yas5xx->dev, "a2 = %d\n", c->a2);
+ dev_dbg(yas5xx->dev, "a3 = %d\n", c->a3);
+ dev_dbg(yas5xx->dev, "a4 = %d\n", c->a4);
+ dev_dbg(yas5xx->dev, "a5 = %d\n", c->a5);
+ dev_dbg(yas5xx->dev, "a6 = %d\n", c->a6);
+ dev_dbg(yas5xx->dev, "a7 = %d\n", c->a7);
+ dev_dbg(yas5xx->dev, "a8 = %d\n", c->a8);
+ dev_dbg(yas5xx->dev, "a9 = %d\n", c->a9);
+ dev_dbg(yas5xx->dev, "k = %d\n", c->k);
+ }
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
{
int ret;
- ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_X, ox);
+ ret = regmap_write(yas5xx->map, YAS530_OFFSET_X, ox);
if (ret)
return ret;
- ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_Y1, oy1);
+ ret = regmap_write(yas5xx->map, YAS530_OFFSET_Y1, oy1);
if (ret)
return ret;
- return regmap_write(yas5xx->map, YAS5XX_OFFSET_Y2, oy2);
+ return regmap_write(yas5xx->map, YAS530_OFFSET_Y2, oy2);
}
-static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
+/* Used by YAS530, YAS532 and YAS533 */
+static s8 yas530_adjust_offset(s8 old, int bit, u16 center, u16 measure)
{
if (measure > center)
return old + BIT(bit);
@@ -720,8 +1186,10 @@ static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
return old;
}
-static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_measure_offsets(struct yas5xx *yas5xx)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
int ret;
u16 center;
u16 t, x, y1, y2;
@@ -729,12 +1197,12 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
int i;
/* Actuate the init coil and measure offsets */
- ret = regmap_write(yas5xx->map, YAS5XX_ACTUATE_INIT_COIL, 0);
+ ret = regmap_write(yas5xx->map, YAS530_ACTUATE_INIT_COIL, 0);
if (ret)
return ret;
/* When the initcoil is active this should be around the center */
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
center = YAS530_DATA_CENTER;
break;
@@ -763,26 +1231,26 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
oy2 = 0;
for (i = 4; i >= 0; i--) {
- ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ ret = yas530_set_offsets(yas5xx, ox, oy1, oy2);
if (ret)
return ret;
- ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ ret = yas530_measure(yas5xx, &t, &x, &y1, &y2);
if (ret)
return ret;
dev_dbg(yas5xx->dev, "measurement %d: x=%d, y1=%d, y2=%d\n",
5-i, x, y1, y2);
- ox = yas5xx_adjust_offset(ox, i, center, x);
- oy1 = yas5xx_adjust_offset(oy1, i, center, y1);
- oy2 = yas5xx_adjust_offset(oy2, i, center, y2);
+ ox = yas530_adjust_offset(ox, i, center, x);
+ oy1 = yas530_adjust_offset(oy1, i, center, y1);
+ oy2 = yas530_adjust_offset(oy2, i, center, y2);
}
/* Needed for calibration algorithm */
yas5xx->hard_offsets[0] = ox;
yas5xx->hard_offsets[1] = oy1;
yas5xx->hard_offsets[2] = oy2;
- ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ ret = yas530_set_offsets(yas5xx, ox, oy1, oy2);
if (ret)
return ret;
@@ -791,35 +1259,139 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
return 0;
}
-static int yas5xx_power_on(struct yas5xx *yas5xx)
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_power_on(struct yas5xx *yas5xx)
{
unsigned int val;
int ret;
/* Zero the test registers */
- ret = regmap_write(yas5xx->map, YAS5XX_TEST1, 0);
+ ret = regmap_write(yas5xx->map, YAS530_TEST1, 0);
if (ret)
return ret;
- ret = regmap_write(yas5xx->map, YAS5XX_TEST2, 0);
+ ret = regmap_write(yas5xx->map, YAS530_TEST2, 0);
if (ret)
return ret;
/* Set up for no interrupts, calibrated clock divider */
val = FIELD_PREP(YAS5XX_CONFIG_CCK_MASK, yas5xx->calibration.dck);
- ret = regmap_write(yas5xx->map, YAS5XX_CONFIG, val);
+ ret = regmap_write(yas5xx->map, YAS530_CONFIG, val);
if (ret)
return ret;
/* Measure interval 0 (back-to-back?) */
- return regmap_write(yas5xx->map, YAS5XX_MEASURE_INTERVAL, 0);
+ return regmap_write(yas5xx->map, YAS530_MEASURE_INTERVAL, 0);
}
+static int yas537_power_on(struct yas5xx *yas5xx)
+{
+ __be16 buf;
+ int ret;
+ u8 intrvl;
+
+ /* Writing ADCCAL and TRM registers */
+ buf = cpu_to_be16(GENMASK(9, 3));
+ ret = regmap_bulk_write(yas5xx->map, YAS537_ADCCAL, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_TRM, GENMASK(7, 0));
+ if (ret)
+ return ret;
+
+ /* The interval value is static in regular operation */
+ intrvl = (YAS537_DEFAULT_SENSOR_DELAY_MS * MILLI
+ - YAS537_MEASURE_TIME_WORST_US) / 4100;
+ ret = regmap_write(yas5xx->map, YAS537_MEASURE_INTERVAL, intrvl);
+ if (ret)
+ return ret;
+
+ /* The average value is also static in regular operation */
+ ret = regmap_write(yas5xx->map, YAS537_AVR, YAS537_MAG_AVERAGE_32_MASK);
+ if (ret)
+ return ret;
+
+ /* Perform the "rcoil" part but skip the "last_after_rcoil" read */
+ ret = regmap_write(yas5xx->map, YAS537_CONFIG, BIT(3));
+ if (ret)
+ return ret;
+
+ /* Wait until the coil has ramped up */
+ usleep_range(YAS537_MAG_RCOIL_TIME_US, YAS537_MAG_RCOIL_TIME_US + 100);
+
+ return 0;
+}
+
+static const struct yas5xx_chip_info yas5xx_chip_info_tbl[] = {
+ [yas530] = {
+ .devid = YAS530_DEVICE_ID,
+ .product_name = "YAS530 MS-3E",
+ .version_names = { "A", "B" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000000, /* picotesla to Gauss */
+ .t_ref = 182, /* counts */
+ .min_temp_x10 = -620, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas530_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas532] = {
+ .devid = YAS532_DEVICE_ID,
+ .product_name = "YAS532 MS-3R",
+ .version_names = { "AB", "AC" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 390, /* counts */
+ .min_temp_x10 = -500, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas532_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas533] = {
+ .devid = YAS532_DEVICE_ID,
+ .product_name = "YAS533 MS-3F",
+ .version_names = { "AB", "AC" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 390, /* counts */
+ .min_temp_x10 = -500, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas532_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas537] = {
+ .devid = YAS537_DEVICE_ID,
+ .product_name = "YAS537 MS-3T",
+ .version_names = { "v0", "v1" }, /* version naming unknown */
+ .volatile_reg = yas537_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas537_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 8120, /* counts */
+ .min_temp_x10 = -3860, /* 1/10:s degrees Celsius */
+ .get_measure = yas537_get_measure,
+ .get_calibration_data = yas537_get_calibration_data,
+ .dump_calibration = yas537_dump_calibration,
+ /* .measure_offets is not needed for yas537 */
+ .power_on = yas537_power_on,
+ },
+};
+
static int yas5xx_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
struct device *dev = &i2c->dev;
struct yas5xx *yas5xx;
+ const struct yas5xx_chip_info *ci;
+ int id_check;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*yas5xx));
@@ -843,10 +1415,8 @@ static int yas5xx_probe(struct i2c_client *i2c,
return dev_err_probe(dev, ret, "cannot get regulators\n");
ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
- if (ret) {
- dev_err(dev, "cannot enable regulators\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot enable regulators\n");
/* See comment in runtime resume callback */
usleep_range(31000, 40000);
@@ -854,57 +1424,55 @@ static int yas5xx_probe(struct i2c_client *i2c,
/* This will take the device out of reset if need be */
yas5xx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(yas5xx->reset)) {
- ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset),
- "failed to get reset line\n");
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset), "failed to get reset line\n");
goto reg_off;
}
yas5xx->map = devm_regmap_init_i2c(i2c, &yas5xx_regmap_config);
if (IS_ERR(yas5xx->map)) {
- dev_err(dev, "failed to allocate register map\n");
- ret = PTR_ERR(yas5xx->map);
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->map), "failed to allocate register map\n");
goto assert_reset;
}
- ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &yas5xx->devid);
+ ci = device_get_match_data(dev);
+ if (!ci)
+ ci = (const struct yas5xx_chip_info *)id->driver_data;
+ yas5xx->chip_info = ci;
+
+ ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &id_check);
if (ret)
goto assert_reset;
- switch (yas5xx->devid) {
- case YAS530_DEVICE_ID:
- ret = yas530_get_calibration_data(yas5xx);
- if (ret)
- goto assert_reset;
- dev_info(dev, "detected YAS530 MS-3E %s",
- yas5xx->version ? "B" : "A");
- strncpy(yas5xx->name, "yas530", sizeof(yas5xx->name));
- break;
- case YAS532_DEVICE_ID:
- ret = yas532_get_calibration_data(yas5xx);
- if (ret)
- goto assert_reset;
- dev_info(dev, "detected YAS532/YAS533 MS-3R/F %s",
- yas5xx->version ? "AC" : "AB");
- strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
- break;
- default:
- ret = -ENODEV;
- dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
+ if (id_check != ci->devid) {
+ ret = dev_err_probe(dev, -ENODEV,
+ "device ID %02x doesn't match %s\n",
+ id_check, id->name);
goto assert_reset;
}
- yas5xx_dump_calibration(yas5xx);
- ret = yas5xx_power_on(yas5xx);
+ ret = ci->get_calibration_data(yas5xx);
if (ret)
goto assert_reset;
- ret = yas5xx_meaure_offsets(yas5xx);
+
+ dev_info(dev, "detected %s %s\n", ci->product_name,
+ ci->version_names[yas5xx->version]);
+
+ ci->dump_calibration(yas5xx);
+
+ ret = ci->power_on(yas5xx);
if (ret)
goto assert_reset;
+ if (ci->measure_offsets) {
+ ret = ci->measure_offsets(yas5xx);
+ if (ret)
+ goto assert_reset;
+ }
+
indio_dev->info = &yas5xx_info;
indio_dev->available_scan_masks = yas5xx_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = yas5xx->name;
+ indio_dev->name = id->name;
indio_dev->channels = yas5xx_channels;
indio_dev->num_channels = ARRAY_SIZE(yas5xx_channels);
@@ -912,13 +1480,13 @@ static int yas5xx_probe(struct i2c_client *i2c,
yas5xx_handle_trigger,
NULL);
if (ret) {
- dev_err(dev, "triggered buffer setup failed\n");
+ dev_err_probe(dev, ret, "triggered buffer setup failed\n");
goto assert_reset;
}
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(dev, "device register failed\n");
+ dev_err_probe(dev, ret, "device register failed\n");
goto cleanup_buffer;
}
@@ -978,6 +1546,7 @@ static int yas5xx_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
@@ -994,7 +1563,7 @@ static int yas5xx_runtime_resume(struct device *dev)
usleep_range(31000, 40000);
gpiod_set_value_cansleep(yas5xx->reset, 0);
- ret = yas5xx_power_on(yas5xx);
+ ret = ci->power_on(yas5xx);
if (ret) {
dev_err(dev, "cannot power on\n");
goto out_reset;
@@ -1013,17 +1582,19 @@ static DEFINE_RUNTIME_DEV_PM_OPS(yas5xx_dev_pm_ops, yas5xx_runtime_suspend,
yas5xx_runtime_resume, NULL);
static const struct i2c_device_id yas5xx_id[] = {
- {"yas530", },
- {"yas532", },
- {"yas533", },
+ {"yas530", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas530] },
+ {"yas532", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas532] },
+ {"yas533", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas533] },
+ {"yas537", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas537] },
{}
};
MODULE_DEVICE_TABLE(i2c, yas5xx_id);
static const struct of_device_id yas5xx_of_match[] = {
- { .compatible = "yamaha,yas530", },
- { .compatible = "yamaha,yas532", },
- { .compatible = "yamaha,yas533", },
+ { .compatible = "yamaha,yas530", &yas5xx_chip_info_tbl[yas530] },
+ { .compatible = "yamaha,yas532", &yas5xx_chip_info_tbl[yas532] },
+ { .compatible = "yamaha,yas533", &yas5xx_chip_info_tbl[yas533] },
+ { .compatible = "yamaha,yas537", &yas5xx_chip_info_tbl[yas537] },
{}
};
MODULE_DEVICE_TABLE(of, yas5xx_of_match);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 0ff756cea63a..c9453389e4f7 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -17,14 +17,14 @@ config ABP060MG
will be called abp060mg.
config BMP280
- tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
+ tristate "Bosch Sensortec BMP180/BMP280/BMP380 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
select REGMAP
select BMP280_I2C if (I2C)
select BMP280_SPI if (SPI_MASTER)
help
- Say yes here to build support for Bosch Sensortec BMP180 and BMP280
- pressure and temperature sensors. Also supports the BME280 with
+ Say yes here to build support for Bosch Sensortec BMP180, BMP280 and
+ BMP380 pressure and temperature sensors. Also supports the BME280 with
an additional humidity sensor channel.
To compile this driver as a module, choose M here: the core module
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe7aa81e7cc9..c0aff78489b4 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -9,13 +9,22 @@
* Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
*
* Datasheet:
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf
+ * https://cdn-shop.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp280-ds001.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp388-ds001.pdf
+ *
+ * Notice:
+ * The link to the bmp180 datasheet points to an outdated version missing these changes:
+ * - Changed document referral from ANP015 to BST-MPS-AN004-00 on page 26
+ * - Updated equation for B3 param on section 3.5 to ((((long)AC1 * 4 + X3) << oss) + 2) / 4
+ * - Updated RoHS directive to 2011/65/EU effective 8 June 2011 on page 26
*/
#define pr_fmt(fmt) "bmp280: " fmt
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -30,6 +39,8 @@
#include <linux/pm_runtime.h>
#include <linux/random.h>
+#include <asm/unaligned.h>
+
#include "bmp280.h"
/*
@@ -74,12 +85,51 @@ struct bmp280_calib {
s8 H6;
};
+/* See datasheet Section 3.11.1. */
+struct bmp380_calib {
+ u16 T1;
+ u16 T2;
+ s8 T3;
+ s16 P1;
+ s16 P2;
+ s8 P3;
+ s8 P4;
+ u16 P5;
+ u16 P6;
+ s8 P7;
+ s8 P8;
+ s16 P9;
+ s8 P10;
+ s8 P11;
+};
+
static const char *const bmp280_supply_names[] = {
"vddd", "vdda"
};
#define BMP280_NUM_SUPPLIES ARRAY_SIZE(bmp280_supply_names)
+enum bmp380_odr {
+ BMP380_ODR_200HZ,
+ BMP380_ODR_100HZ,
+ BMP380_ODR_50HZ,
+ BMP380_ODR_25HZ,
+ BMP380_ODR_12_5HZ,
+ BMP380_ODR_6_25HZ,
+ BMP380_ODR_3_125HZ,
+ BMP380_ODR_1_5625HZ,
+ BMP380_ODR_0_78HZ,
+ BMP380_ODR_0_39HZ,
+ BMP380_ODR_0_2HZ,
+ BMP380_ODR_0_1HZ,
+ BMP380_ODR_0_05HZ,
+ BMP380_ODR_0_02HZ,
+ BMP380_ODR_0_01HZ,
+ BMP380_ODR_0_006HZ,
+ BMP380_ODR_0_003HZ,
+ BMP380_ODR_0_0015HZ,
+};
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -90,6 +140,7 @@ struct bmp280_data {
union {
struct bmp180_calib bmp180;
struct bmp280_calib bmp280;
+ struct bmp380_calib bmp380;
} calib;
struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
unsigned int start_up_time; /* in microseconds */
@@ -98,36 +149,99 @@ struct bmp280_data {
u8 oversampling_press;
u8 oversampling_temp;
u8 oversampling_humid;
+ u8 iir_filter_coeff;
+
+ /*
+ * BMP380 devices introduce sampling frequency configuration. See
+ * datasheet sections 3.3.3. and 4.3.19 for more details.
+ *
+ * BMx280 devices allowed indirect configuration of sampling frequency
+ * changing the t_standby duration between measurements, as detailed on
+ * section 3.6.3 of the datasheet.
+ */
+ int sampling_freq;
/*
* Carryover value from temperature conversion, used in pressure
* calculation.
*/
s32 t_fine;
+
+ /*
+ * DMA (thus cache coherency maintenance) may require the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ /* Sensor data buffer */
+ u8 buf[3];
+ /* Calibration data buffers */
+ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
+ __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
+ u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
+ /* Miscellaneous, endianess-aware data buffers */
+ __le16 le16;
+ __be16 be16;
+ } __aligned(IIO_DMA_MINALIGN);
};
struct bmp280_chip_info {
+ unsigned int id_reg;
+
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ unsigned int start_up_time;
+
const int *oversampling_temp_avail;
int num_oversampling_temp_avail;
+ int oversampling_temp_default;
const int *oversampling_press_avail;
int num_oversampling_press_avail;
+ int oversampling_press_default;
const int *oversampling_humid_avail;
int num_oversampling_humid_avail;
+ int oversampling_humid_default;
+
+ const int *iir_filter_coeffs_avail;
+ int num_iir_filter_coeffs_avail;
+ int iir_filter_coeff_default;
+
+ const int (*sampling_freq_avail)[2];
+ int num_sampling_freq_avail;
+ int sampling_freq_default;
int (*chip_config)(struct bmp280_data *);
int (*read_temp)(struct bmp280_data *, int *);
int (*read_press)(struct bmp280_data *, int *, int *);
int (*read_humid)(struct bmp280_data *, int *, int *);
+ int (*read_calib)(struct bmp280_data *);
};
/*
* These enums are used for indexing into the array of compensation
* parameters for BMP280.
*/
-enum { T1, T2, T3 };
-enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
+enum { T1, T2, T3, P1, P2, P3, P4, P5, P6, P7, P8, P9 };
+
+enum {
+ /* Temperature calib indexes */
+ BMP380_T1 = 0,
+ BMP380_T2 = 2,
+ BMP380_T3 = 4,
+ /* Pressure calib indexes */
+ BMP380_P1 = 5,
+ BMP380_P2 = 7,
+ BMP380_P3 = 9,
+ BMP380_P4 = 10,
+ BMP380_P5 = 11,
+ BMP380_P6 = 13,
+ BMP380_P7 = 15,
+ BMP380_P8 = 16,
+ BMP380_P9 = 17,
+ BMP380_P10 = 19,
+ BMP380_P11 = 20,
+};
static const struct iio_chan_spec bmp280_channels[] = {
{
@@ -147,56 +261,81 @@ static const struct iio_chan_spec bmp280_channels[] = {
},
};
-static int bmp280_read_calib(struct bmp280_data *data,
- struct bmp280_calib *calib,
- unsigned int chip)
+static const struct iio_chan_spec bmp380_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+};
+
+static int bmp280_read_calib(struct bmp280_data *data)
{
+ struct bmp280_calib *calib = &data->calib.bmp280;
int ret;
- unsigned int tmp;
- __le16 l16;
- __be16 b16;
- struct device *dev = data->dev;
- __le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
- __le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
- /* Read temperature calibration values. */
+
+ /* Read temperature and pressure calibration values. */
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
- t_buf, BMP280_COMP_TEMP_REG_COUNT);
+ data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
if (ret < 0) {
dev_err(data->dev,
- "failed to read temperature calibration parameters\n");
+ "failed to read temperature and pressure calibration parameters\n");
return ret;
}
- /* Toss the temperature calibration data into the entropy pool */
- add_device_randomness(t_buf, sizeof(t_buf));
+ /* Toss the temperature and pressure calibration data into the entropy pool */
+ add_device_randomness(data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
+
+ /* Parse temperature calibration values. */
+ calib->T1 = le16_to_cpu(data->bmp280_cal_buf[T1]);
+ calib->T2 = le16_to_cpu(data->bmp280_cal_buf[T2]);
+ calib->T3 = le16_to_cpu(data->bmp280_cal_buf[T3]);
+
+ /* Parse pressure calibration values. */
+ calib->P1 = le16_to_cpu(data->bmp280_cal_buf[P1]);
+ calib->P2 = le16_to_cpu(data->bmp280_cal_buf[P2]);
+ calib->P3 = le16_to_cpu(data->bmp280_cal_buf[P3]);
+ calib->P4 = le16_to_cpu(data->bmp280_cal_buf[P4]);
+ calib->P5 = le16_to_cpu(data->bmp280_cal_buf[P5]);
+ calib->P6 = le16_to_cpu(data->bmp280_cal_buf[P6]);
+ calib->P7 = le16_to_cpu(data->bmp280_cal_buf[P7]);
+ calib->P8 = le16_to_cpu(data->bmp280_cal_buf[P8]);
+ calib->P9 = le16_to_cpu(data->bmp280_cal_buf[P9]);
- calib->T1 = le16_to_cpu(t_buf[T1]);
- calib->T2 = le16_to_cpu(t_buf[T2]);
- calib->T3 = le16_to_cpu(t_buf[T3]);
+ return 0;
+}
- /* Read pressure calibration values. */
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
- p_buf, BMP280_COMP_PRESS_REG_COUNT);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read pressure calibration parameters\n");
+static int bme280_read_calib(struct bmp280_data *data)
+{
+ struct bmp280_calib *calib = &data->calib.bmp280;
+ struct device *dev = data->dev;
+ unsigned int tmp;
+ int ret;
+
+ /* Load shared calibration params with bmp280 first */
+ ret = bmp280_read_calib(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to read common bmp280 calibration parameters\n");
return ret;
}
- /* Toss the pressure calibration data into the entropy pool */
- add_device_randomness(p_buf, sizeof(p_buf));
-
- calib->P1 = le16_to_cpu(p_buf[P1]);
- calib->P2 = le16_to_cpu(p_buf[P2]);
- calib->P3 = le16_to_cpu(p_buf[P3]);
- calib->P4 = le16_to_cpu(p_buf[P4]);
- calib->P5 = le16_to_cpu(p_buf[P5]);
- calib->P6 = le16_to_cpu(p_buf[P6]);
- calib->P7 = le16_to_cpu(p_buf[P7]);
- calib->P8 = le16_to_cpu(p_buf[P8]);
- calib->P9 = le16_to_cpu(p_buf[P9]);
-
/*
* Read humidity calibration values.
* Due to some odd register addressing we cannot just
@@ -204,8 +343,6 @@ static int bmp280_read_calib(struct bmp280_data *data,
* value separately and sometimes do some bit shifting...
* Humidity data is only available on BME280.
*/
- if (chip != BME280_CHIP_ID)
- return 0;
ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
if (ret < 0) {
@@ -214,12 +351,13 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &l16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2,
+ &data->le16, sizeof(data->le16));
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- calib->H2 = sign_extend32(le16_to_cpu(l16), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(data->le16), 15);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
@@ -228,20 +366,22 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &b16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4,
+ &data->be16, sizeof(data->be16));
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- calib->H4 = sign_extend32(((be16_to_cpu(b16) >> 4) & 0xff0) |
- (be16_to_cpu(b16) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(data->be16) >> 4) & 0xff0) |
+ (be16_to_cpu(data->be16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &l16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5,
+ &data->le16, sizeof(data->le16));
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(((le16_to_cpu(l16) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(FIELD_GET(BMP280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
@@ -261,8 +401,8 @@ static int bmp280_read_calib(struct bmp280_data *data,
static u32 bmp280_compensate_humidity(struct bmp280_data *data,
s32 adc_humidity)
{
- s32 var;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s32 var;
var = ((s32)data->t_fine) - (s32)76800;
var = ((((adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
@@ -286,8 +426,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
static s32 bmp280_compensate_temp(struct bmp280_data *data,
s32 adc_temp)
{
- s32 var1, var2;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s32 var1, var2;
var1 = (((adc_temp >> 3) - ((s32)calib->T1 << 1)) *
((s32)calib->T2)) >> 11;
@@ -309,8 +449,8 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
static u32 bmp280_compensate_press(struct bmp280_data *data,
s32 adc_press)
{
- s64 var1, var2, p;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s64 var1, var2, p;
var1 = ((s64)data->t_fine) - 128000;
var2 = var1 * var1 * (s64)calib->P6;
@@ -335,17 +475,17 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
static int bmp280_read_temp(struct bmp280_data *data,
int *val)
{
- int ret;
- __be32 tmp = 0;
s32 adc_temp, comp_temp;
+ int ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ data->buf, sizeof(data->buf));
if (ret < 0) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
}
- adc_temp = be32_to_cpu(tmp) >> 12;
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
if (adc_temp == BMP280_TEMP_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading temperature skipped\n");
@@ -368,23 +508,23 @@ static int bmp280_read_temp(struct bmp280_data *data,
static int bmp280_read_press(struct bmp280_data *data,
int *val, int *val2)
{
- int ret;
- __be32 tmp = 0;
- s32 adc_press;
u32 comp_press;
+ s32 adc_press;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp280_read_temp(data, NULL);
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, sizeof(data->buf));
if (ret < 0) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
}
- adc_press = be32_to_cpu(tmp) >> 12;
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
if (adc_press == BMP280_PRESS_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading pressure skipped\n");
@@ -400,23 +540,23 @@ static int bmp280_read_press(struct bmp280_data *data,
static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
- __be16 tmp;
- int ret;
- s32 adc_humidity;
u32 comp_humidity;
+ s32 adc_humidity;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp280_read_temp(data, NULL);
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
+ &data->be16, sizeof(data->be16));
if (ret < 0) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
}
- adc_humidity = be16_to_cpu(tmp);
+ adc_humidity = be16_to_cpu(data->be16);
if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading humidity skipped\n");
@@ -433,8 +573,8 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret;
struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
pm_runtime_get_sync(data->dev);
mutex_lock(&data->lock);
@@ -475,6 +615,25 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!data->chip_info->sampling_freq_avail) {
+ ret = -EINVAL;
+ break;
+ }
+
+ *val = data->chip_info->sampling_freq_avail[data->sampling_freq][0];
+ *val2 = data->chip_info->sampling_freq_avail[data->sampling_freq][1];
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (!data->chip_info->iir_filter_coeffs_avail) {
+ ret = -EINVAL;
+ break;
+ }
+
+ *val = (1 << data->iir_filter_coeff) - 1;
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
break;
@@ -490,15 +649,23 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_humid_avail;
const int n = data->chip_info->num_oversampling_humid_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_humid;
data->oversampling_humid = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_humid = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
}
}
return -EINVAL;
@@ -507,15 +674,23 @@ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_temp_avail;
const int n = data->chip_info->num_oversampling_temp_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_temp;
data->oversampling_temp = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_temp = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
}
}
return -EINVAL;
@@ -524,15 +699,73 @@ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_press_avail;
const int n = data->chip_info->num_oversampling_press_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_press;
data->oversampling_press = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_press = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_sampling_frequency(struct bmp280_data *data,
+ int val, int val2)
+{
+ const int (*avail)[2] = data->chip_info->sampling_freq_avail;
+ const int n = data->chip_info->num_sampling_freq_avail;
+ int ret, prev;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i][0] == val && avail[i][1] == val2) {
+ prev = data->sampling_freq;
+ data->sampling_freq = i;
+
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->sampling_freq = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_iir_filter_coeffs(struct bmp280_data *data, int val)
+{
+ const int *avail = data->chip_info->iir_filter_coeffs_avail;
+ const int n = data->chip_info->num_iir_filter_coeffs_avail;
+ int ret, prev;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] - 1 == val) {
+ prev = data->iir_filter_coeff;
+ data->iir_filter_coeff = i;
+
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->iir_filter_coeff = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+
+ }
+ return 0;
}
}
return -EINVAL;
@@ -542,9 +775,15 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- int ret = 0;
struct bmp280_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ /*
+ * Helper functions to update sensor running configuration.
+ * If an error happens applying new settings, will try restore
+ * previous parameters to ensure the sensor is left in a known
+ * working configuration.
+ */
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
pm_runtime_get_sync(data->dev);
@@ -567,6 +806,22 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+ ret = bmp280_write_sampling_frequency(data, val, val2);
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ break;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+ ret = bmp280_write_iir_filter_coeffs(data, val);
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ break;
default:
return -EINVAL;
}
@@ -597,6 +852,17 @@ static int bmp280_read_avail(struct iio_dev *indio_dev,
}
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)data->chip_info->sampling_freq_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = data->chip_info->num_sampling_freq_avail;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = data->chip_info->iir_filter_coeffs_avail;
+ *type = IIO_VAL_INT;
+ *length = data->chip_info->num_iir_filter_coeffs_avail;
+ return IIO_AVAIL_LIST;
default:
return -EINVAL;
}
@@ -610,9 +876,9 @@ static const struct iio_info bmp280_info = {
static int bmp280_chip_config(struct bmp280_data *data)
{
+ u8 osrs = FIELD_PREP(BMP280_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
+ FIELD_PREP(BMP280_OSRS_PRESS_MASK, data->oversampling_press + 1);
int ret;
- u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
- BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
@@ -640,21 +906,39 @@ static int bmp280_chip_config(struct bmp280_data *data)
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
static const struct bmp280_chip_info bmp280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 2,
+
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ /*
+ * Oversampling config values on BMx280 have one additional setting
+ * that other generations of the family don't:
+ * The value 0 means the measurement is bypassed instead of
+ * oversampling set to x1.
+ *
+ * To account for this difference, and preserve the same common
+ * config logic, this is handled later on chip_config callback
+ * incrementing one unit the oversampling setting.
+ */
+ .oversampling_temp_default = BMP280_OSRS_TEMP_2X - 1,
.oversampling_press_avail = bmp280_oversampling_avail,
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
.chip_config = bmp280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
+ .read_calib = bmp280_read_calib,
};
static int bme280_chip_config(struct bmp280_data *data)
{
+ u8 osrs = FIELD_PREP(BMP280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
int ret;
- u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
/*
* Oversampling of humidity must be set before oversampling of
@@ -670,27 +954,405 @@ static int bme280_chip_config(struct bmp280_data *data)
}
static const struct bmp280_chip_info bme280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 3,
+
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_temp_default = BMP280_OSRS_TEMP_2X - 1,
.oversampling_press_avail = bmp280_oversampling_avail,
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
.oversampling_humid_avail = bmp280_oversampling_avail,
.num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_humid_default = BMP280_OSRS_HUMIDITY_16X - 1,
.chip_config = bme280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_humid = bmp280_read_humid,
+ .read_calib = bme280_read_calib,
};
-static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+/*
+ * Helper function to send a command to BMP3XX sensors.
+ *
+ * Sensor processes commands written to the CMD register and signals
+ * execution result through "cmd_rdy" and "cmd_error" flags available on
+ * STATUS and ERROR registers.
+ */
+static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
+{
+ unsigned int reg;
+ int ret;
+
+ /* Check if device is ready to process a command */
+ ret = regmap_read(data->regmap, BMP380_REG_STATUS, &reg);
+ if (ret) {
+ dev_err(data->dev, "failed to read error register\n");
+ return ret;
+ }
+ if (!(reg & BMP380_STATUS_CMD_RDY_MASK)) {
+ dev_err(data->dev, "device is not ready to accept commands\n");
+ return -EBUSY;
+ }
+
+ /* Send command to process */
+ ret = regmap_write(data->regmap, BMP380_REG_CMD, cmd);
+ if (ret) {
+ dev_err(data->dev, "failed to send command to device\n");
+ return ret;
+ }
+ /* Wait for 2ms for command to be processed */
+ usleep_range(data->start_up_time, data->start_up_time + 100);
+ /* Check for command processing error */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &reg);
+ if (ret) {
+ dev_err(data->dev, "error reading ERROR reg\n");
+ return ret;
+ }
+ if (reg & BMP380_ERR_CMD_MASK) {
+ dev_err(data->dev, "error processing command 0x%X\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
+ * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+ */
+static s32 bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+{
+ s64 var1, var2, var3, var4, var5, var6, comp_temp;
+ struct bmp380_calib *calib = &data->calib.bmp380;
+
+ var1 = ((s64) adc_temp) - (((s64) calib->T1) << 8);
+ var2 = var1 * ((s64) calib->T2);
+ var3 = var1 * var1;
+ var4 = var3 * ((s64) calib->T3);
+ var5 = (var2 << 18) + var4;
+ var6 = var5 >> 32;
+ data->t_fine = (s32) var6;
+ comp_temp = (var6 * 25) >> 14;
+
+ comp_temp = clamp_val(comp_temp, BMP380_MIN_TEMP, BMP380_MAX_TEMP);
+ return (s32) comp_temp;
+}
+
+/*
+ * Returns pressure in Pa as an unsigned 32 bit integer in fractional Pascal.
+ * Output value of "9528709" represents 9528709/100 = 95287.09 Pa = 952.8709 hPa.
+ *
+ * Taken from datasheet, Section 9.3. "Pressure compensation" and repository
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+ */
+static u32 bmp380_compensate_press(struct bmp280_data *data, u32 adc_press)
+{
+ s64 var1, var2, var3, var4, var5, var6, offset, sensitivity;
+ struct bmp380_calib *calib = &data->calib.bmp380;
+ u32 comp_press;
+
+ var1 = (s64)data->t_fine * (s64)data->t_fine;
+ var2 = var1 >> 6;
+ var3 = (var2 * ((s64) data->t_fine)) >> 8;
+ var4 = ((s64)calib->P8 * var3) >> 5;
+ var5 = ((s64)calib->P7 * var1) << 4;
+ var6 = ((s64)calib->P6 * (s64)data->t_fine) << 22;
+ offset = ((s64)calib->P5 << 47) + var4 + var5 + var6;
+ var2 = ((s64)calib->P4 * var3) >> 5;
+ var4 = ((s64)calib->P3 * var1) << 2;
+ var5 = ((s64)calib->P2 - ((s64)1 << 14)) *
+ ((s64)data->t_fine << 21);
+ sensitivity = (((s64) calib->P1 - ((s64) 1 << 14)) << 46) +
+ var2 + var4 + var5;
+ var1 = (sensitivity >> 24) * (s64)adc_press;
+ var2 = (s64)calib->P10 * (s64)data->t_fine;
+ var3 = var2 + ((s64)calib->P9 << 16);
+ var4 = (var3 * (s64)adc_press) >> 13;
+
+ /*
+ * Dividing by 10 followed by multiplying by 10 to avoid
+ * possible overflow caused by (uncomp_data->pressure * partial_data4).
+ */
+ var5 = ((s64)adc_press * div_s64(var4, 10)) >> 9;
+ var5 *= 10;
+ var6 = (s64)adc_press * (s64)adc_press;
+ var2 = ((s64)calib->P11 * var6) >> 16;
+ var3 = (var2 * (s64)adc_press) >> 7;
+ var4 = (offset >> 2) + var1 + var5 + var3;
+ comp_press = ((u64)var4 * 25) >> 40;
+
+ comp_press = clamp_val(comp_press, BMP380_MIN_PRES, BMP380_MAX_PRES);
+ return comp_press;
+}
+
+static int bmp380_read_temp(struct bmp280_data *data, int *val)
+{
+ s32 comp_temp;
+ u32 adc_temp;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = get_unaligned_le24(data->buf);
+ if (adc_temp == BMP380_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
+ comp_temp = bmp380_compensate_temp(data, adc_temp);
+
+ /*
+ * Val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ /* IIO reports temperatures in milli Celsius */
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
+{
+ s32 comp_press;
+ u32 adc_press;
+ int ret;
+
+ /* Read and compensate for temperature so we get a reading of t_fine */
+ ret = bmp380_read_temp(data, NULL);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = get_unaligned_le24(data->buf);
+ if (adc_press == BMP380_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
+ comp_press = bmp380_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ /* Compensated pressure is in cPa (centipascals) */
+ *val2 = 100000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp380_read_calib(struct bmp280_data *data)
+{
+ struct bmp380_calib *calib = &data->calib.bmp380;
+ int ret;
+
+ /* Read temperature and pressure calibration data */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_CALIB_TEMP_START,
+ data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ /* Toss the temperature calibration data into the entropy pool */
+ add_device_randomness(data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+
+ /* Parse calibration values */
+ calib->T1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T1]);
+ calib->T2 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T2]);
+ calib->T3 = data->bmp380_cal_buf[BMP380_T3];
+ calib->P1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P1]);
+ calib->P2 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P2]);
+ calib->P3 = data->bmp380_cal_buf[BMP380_P3];
+ calib->P4 = data->bmp380_cal_buf[BMP380_P4];
+ calib->P5 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P5]);
+ calib->P6 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P6]);
+ calib->P7 = data->bmp380_cal_buf[BMP380_P7];
+ calib->P8 = data->bmp380_cal_buf[BMP380_P8];
+ calib->P9 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P9]);
+ calib->P10 = data->bmp380_cal_buf[BMP380_P10];
+ calib->P11 = data->bmp380_cal_buf[BMP380_P11];
+
+ return 0;
+}
+
+static const int bmp380_odr_table[][2] = {
+ [BMP380_ODR_200HZ] = {200, 0},
+ [BMP380_ODR_100HZ] = {100, 0},
+ [BMP380_ODR_50HZ] = {50, 0},
+ [BMP380_ODR_25HZ] = {25, 0},
+ [BMP380_ODR_12_5HZ] = {12, 500000},
+ [BMP380_ODR_6_25HZ] = {6, 250000},
+ [BMP380_ODR_3_125HZ] = {3, 125000},
+ [BMP380_ODR_1_5625HZ] = {1, 562500},
+ [BMP380_ODR_0_78HZ] = {0, 781250},
+ [BMP380_ODR_0_39HZ] = {0, 390625},
+ [BMP380_ODR_0_2HZ] = {0, 195313},
+ [BMP380_ODR_0_1HZ] = {0, 97656},
+ [BMP380_ODR_0_05HZ] = {0, 48828},
+ [BMP380_ODR_0_02HZ] = {0, 24414},
+ [BMP380_ODR_0_01HZ] = {0, 12207},
+ [BMP380_ODR_0_006HZ] = {0, 6104},
+ [BMP380_ODR_0_003HZ] = {0, 3052},
+ [BMP380_ODR_0_0015HZ] = {0, 1526},
+};
+
+static int bmp380_chip_config(struct bmp280_data *data)
{
+ bool change = false, aux;
+ unsigned int tmp;
+ u8 osrs;
int ret;
+
+ /* Configure power control register */
+ ret = regmap_update_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_CTRL_SENSORS_MASK,
+ BMP380_CTRL_SENSORS_PRESS_EN |
+ BMP380_CTRL_SENSORS_TEMP_EN);
+ if (ret) {
+ dev_err(data->dev,
+ "failed to write operation control register\n");
+ return ret;
+ }
+
+ /* Configure oversampling */
+ osrs = FIELD_PREP(BMP380_OSRS_TEMP_MASK, data->oversampling_temp) |
+ FIELD_PREP(BMP380_OSRS_PRESS_MASK, data->oversampling_press);
+
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_OSR,
+ BMP380_OSRS_TEMP_MASK |
+ BMP380_OSRS_PRESS_MASK,
+ osrs, &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write oversampling register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ /* Configure output data rate */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_ODR,
+ BMP380_ODRS_MASK, data->sampling_freq, &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write ODR selection register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ /* Set filter data */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_CONFIG, BMP380_FILTER_MASK,
+ FIELD_PREP(BMP380_FILTER_MASK, data->iir_filter_coeff),
+ &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write config register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ if (change) {
+ /*
+ * The configurations errors are detected on the fly during a measurement
+ * cycle. If the sampling frequency is too low, it's faster to reset
+ * the measurement loop than wait until the next measurement is due.
+ *
+ * Resets sensor measurement loop toggling between sleep and normal
+ * operating modes.
+ */
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+ FIELD_PREP(BMP380_MODE_MASK, BMP380_MODE_SLEEP));
+ if (ret) {
+ dev_err(data->dev, "failed to set sleep mode\n");
+ return ret;
+ }
+ usleep_range(2000, 2500);
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+ FIELD_PREP(BMP380_MODE_MASK, BMP380_MODE_NORMAL));
+ if (ret) {
+ dev_err(data->dev, "failed to set normal mode\n");
+ return ret;
+ }
+ /*
+ * Waits for measurement before checking configuration error flag.
+ * Selected longest measure time indicated in section 3.9.1
+ * in the datasheet.
+ */
+ msleep(80);
+
+ /* Check config error flag */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read error register\n");
+ return ret;
+ }
+ if (tmp & BMP380_ERR_CONF_MASK) {
+ dev_warn(data->dev,
+ "sensor flagged configuration as incompatible\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
+static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
+
+static const struct bmp280_chip_info bmp380_chip_info = {
+ .id_reg = BMP380_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp380_channels,
+ .num_channels = 2,
+
+ .oversampling_temp_avail = bmp380_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp380_oversampling_avail),
+ .oversampling_temp_default = ilog2(1),
+
+ .oversampling_press_avail = bmp380_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp380_oversampling_avail),
+ .oversampling_press_default = ilog2(4),
+
+ .sampling_freq_avail = bmp380_odr_table,
+ .num_sampling_freq_avail = ARRAY_SIZE(bmp380_odr_table) * 2,
+ .sampling_freq_default = BMP380_ODR_50HZ,
+
+ .iir_filter_coeffs_avail = bmp380_iir_filter_coeffs_avail,
+ .num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
+ .iir_filter_coeff_default = 2,
+
+ .chip_config = bmp380_chip_config,
+ .read_temp = bmp380_read_temp,
+ .read_press = bmp380_read_press,
+ .read_calib = bmp380_read_calib,
+};
+
+static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+{
const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
unsigned int delay_us;
unsigned int ctrl;
+ int ret;
if (data->use_eoc)
reinit_completion(&data->done);
@@ -710,7 +1372,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
if (!ret)
dev_err(data->dev, "timeout waiting for completion\n");
} else {
- if (ctrl_meas == BMP180_MEAS_TEMP)
+ if (FIELD_GET(BMP180_MEAS_CTRL_MASK, ctrl_meas) == BMP180_MEAS_TEMP)
delay_us = 4500;
else
delay_us =
@@ -732,55 +1394,57 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
{
- __be16 tmp;
int ret;
- ret = bmp180_measure(data, BMP180_MEAS_TEMP);
+ ret = bmp180_measure(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_TEMP) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
+ &data->be16, sizeof(data->be16));
if (ret)
return ret;
- *val = be16_to_cpu(tmp);
+ *val = be16_to_cpu(data->be16);
return 0;
}
-static int bmp180_read_calib(struct bmp280_data *data,
- struct bmp180_calib *calib)
+static int bmp180_read_calib(struct bmp280_data *data)
{
+ struct bmp180_calib *calib = &data->calib.bmp180;
int ret;
int i;
- __be16 buf[BMP180_REG_CALIB_COUNT / 2];
- ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
- sizeof(buf));
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START,
+ data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
if (ret < 0)
return ret;
/* None of the words has the value 0 or 0xFFFF */
- for (i = 0; i < ARRAY_SIZE(buf); i++) {
- if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
+ for (i = 0; i < ARRAY_SIZE(data->bmp180_cal_buf); i++) {
+ if (data->bmp180_cal_buf[i] == cpu_to_be16(0) ||
+ data->bmp180_cal_buf[i] == cpu_to_be16(0xffff))
return -EIO;
}
/* Toss the calibration data into the entropy pool */
- add_device_randomness(buf, sizeof(buf));
-
- calib->AC1 = be16_to_cpu(buf[AC1]);
- calib->AC2 = be16_to_cpu(buf[AC2]);
- calib->AC3 = be16_to_cpu(buf[AC3]);
- calib->AC4 = be16_to_cpu(buf[AC4]);
- calib->AC5 = be16_to_cpu(buf[AC5]);
- calib->AC6 = be16_to_cpu(buf[AC6]);
- calib->B1 = be16_to_cpu(buf[B1]);
- calib->B2 = be16_to_cpu(buf[B2]);
- calib->MB = be16_to_cpu(buf[MB]);
- calib->MC = be16_to_cpu(buf[MC]);
- calib->MD = be16_to_cpu(buf[MD]);
+ add_device_randomness(data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
+
+ calib->AC1 = be16_to_cpu(data->bmp180_cal_buf[AC1]);
+ calib->AC2 = be16_to_cpu(data->bmp180_cal_buf[AC2]);
+ calib->AC3 = be16_to_cpu(data->bmp180_cal_buf[AC3]);
+ calib->AC4 = be16_to_cpu(data->bmp180_cal_buf[AC4]);
+ calib->AC5 = be16_to_cpu(data->bmp180_cal_buf[AC5]);
+ calib->AC6 = be16_to_cpu(data->bmp180_cal_buf[AC6]);
+ calib->B1 = be16_to_cpu(data->bmp180_cal_buf[B1]);
+ calib->B2 = be16_to_cpu(data->bmp180_cal_buf[B2]);
+ calib->MB = be16_to_cpu(data->bmp180_cal_buf[MB]);
+ calib->MC = be16_to_cpu(data->bmp180_cal_buf[MC]);
+ calib->MD = be16_to_cpu(data->bmp180_cal_buf[MD]);
return 0;
}
@@ -793,8 +1457,8 @@ static int bmp180_read_calib(struct bmp280_data *data,
*/
static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
{
- s32 x1, x2;
struct bmp180_calib *calib = &data->calib.bmp180;
+ s32 x1, x2;
x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
x2 = (calib->MC << 11) / (x1 + calib->MD);
@@ -805,8 +1469,8 @@ static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
static int bmp180_read_temp(struct bmp280_data *data, int *val)
{
- int ret;
s32 adc_temp, comp_temp;
+ int ret;
ret = bmp180_read_adc_temp(data, &adc_temp);
if (ret)
@@ -828,19 +1492,22 @@ static int bmp180_read_temp(struct bmp280_data *data, int *val)
static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
{
- int ret;
- __be32 tmp = 0;
u8 oss = data->oversampling_press;
+ int ret;
- ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
+ ret = bmp180_measure(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_PRESS) |
+ FIELD_PREP(BMP180_OSRS_PRESS_MASK, oss) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
+ data->buf, sizeof(data->buf));
if (ret)
return ret;
- *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
+ *val = get_unaligned_be24(data->buf) >> (8 - oss);
return 0;
}
@@ -852,11 +1519,11 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
*/
static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
{
+ struct bmp180_calib *calib = &data->calib.bmp180;
+ s32 oss = data->oversampling_press;
s32 x1, x2, x3, p;
s32 b3, b6;
u32 b4, b7;
- s32 oss = data->oversampling_press;
- struct bmp180_calib *calib = &data->calib.bmp180;
b6 = data->t_fine - 4000;
x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
@@ -883,9 +1550,9 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
static int bmp180_read_press(struct bmp280_data *data,
int *val, int *val2)
{
- int ret;
- s32 adc_press;
u32 comp_press;
+ s32 adc_press;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp180_read_temp(data, NULL);
@@ -913,17 +1580,25 @@ static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
static const struct bmp280_chip_info bmp180_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 2,
+
.oversampling_temp_avail = bmp180_oversampling_temp_avail,
.num_oversampling_temp_avail =
ARRAY_SIZE(bmp180_oversampling_temp_avail),
+ .oversampling_temp_default = 0,
.oversampling_press_avail = bmp180_oversampling_press_avail,
.num_oversampling_press_avail =
ARRAY_SIZE(bmp180_oversampling_press_avail),
+ .oversampling_press_default = BMP180_MEAS_PRESS_8X,
.chip_config = bmp180_chip_config,
.read_temp = bmp180_read_temp,
.read_press = bmp180_read_press,
+ .read_calib = bmp180_read_calib,
};
static irqreturn_t bmp085_eoc_irq(int irq, void *d)
@@ -990,11 +1665,12 @@ int bmp280_common_probe(struct device *dev,
const char *name,
int irq)
{
- int ret;
+ const struct bmp280_chip_info *chip_info;
struct iio_dev *indio_dev;
struct bmp280_data *data;
- unsigned int chip_id;
struct gpio_desc *gpiod;
+ unsigned int chip_id;
+ int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -1005,36 +1681,36 @@ int bmp280_common_probe(struct device *dev,
data->dev = dev;
indio_dev->name = name;
- indio_dev->channels = bmp280_channels;
indio_dev->info = &bmp280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
switch (chip) {
case BMP180_CHIP_ID:
- indio_dev->num_channels = 2;
- data->chip_info = &bmp180_chip_info;
- data->oversampling_press = ilog2(8);
- data->oversampling_temp = ilog2(1);
- data->start_up_time = 10000;
+ chip_info = &bmp180_chip_info;
break;
case BMP280_CHIP_ID:
- indio_dev->num_channels = 2;
- data->chip_info = &bmp280_chip_info;
- data->oversampling_press = ilog2(16);
- data->oversampling_temp = ilog2(2);
- data->start_up_time = 2000;
+ chip_info = &bmp280_chip_info;
break;
case BME280_CHIP_ID:
- indio_dev->num_channels = 3;
- data->chip_info = &bme280_chip_info;
- data->oversampling_press = ilog2(16);
- data->oversampling_humid = ilog2(16);
- data->oversampling_temp = ilog2(2);
- data->start_up_time = 2000;
+ chip_info = &bme280_chip_info;
+ break;
+ case BMP380_CHIP_ID:
+ chip_info = &bmp380_chip_info;
break;
default:
return -EINVAL;
}
+ data->chip_info = chip_info;
+
+ /* Apply initial values from chip info structure */
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = chip_info->num_channels;
+ data->oversampling_press = chip_info->oversampling_press_default;
+ data->oversampling_humid = chip_info->oversampling_humid_default;
+ data->oversampling_temp = chip_info->oversampling_temp_default;
+ data->iir_filter_coeff = chip_info->iir_filter_coeff_default;
+ data->sampling_freq = chip_info->sampling_freq_default;
+ data->start_up_time = chip_info->start_up_time;
/* Bring up regulators */
regulator_bulk_set_supply_names(data->supplies,
@@ -1071,7 +1747,8 @@ int bmp280_common_probe(struct device *dev,
}
data->regmap = regmap;
- ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
+
+ ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
if (ret < 0)
return ret;
if (chip_id != chip) {
@@ -1080,6 +1757,13 @@ int bmp280_common_probe(struct device *dev,
return -EINVAL;
}
+ /* BMP3xx requires soft-reset as part of initialization */
+ if (chip_id == BMP380_CHIP_ID) {
+ ret = bmp380_cmd(data, BMP380_CMD_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+ }
+
ret = data->chip_info->chip_config(data);
if (ret < 0)
return ret;
@@ -1091,21 +1775,11 @@ int bmp280_common_probe(struct device *dev,
* non-volatile memory during production". Let's read them out at probe
* time once. They will not change.
*/
- if (chip_id == BMP180_CHIP_ID) {
- ret = bmp180_read_calib(data, &data->calib.bmp180);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
- } else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
- ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
- }
+
+ ret = data->chip_info->read_calib(data);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret,
+ "failed to read calibration coefficients\n");
/*
* Attempt to grab an optional EOC IRQ - only the BMP085 has this
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index bf4a7a617537..0c27211f3ea0 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -19,6 +19,9 @@ static int bmp280_i2c_probe(struct i2c_client *client,
case BME280_CHIP_ID:
regmap_config = &bmp280_regmap_config;
break;
+ case BMP380_CHIP_ID:
+ regmap_config = &bmp380_regmap_config;
+ break;
default:
return -EINVAL;
}
@@ -37,19 +40,21 @@ static int bmp280_i2c_probe(struct i2c_client *client,
}
static const struct of_device_id bmp280_of_i2c_match[] = {
- { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
- { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
- { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
{ .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
+ { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
+ { .compatible = "bosch,bmp380", .data = (void *)BMP380_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
static const struct i2c_device_id bmp280_i2c_id[] = {
- {"bmp280", BMP280_CHIP_ID },
- {"bmp180", BMP180_CHIP_ID },
{"bmp085", BMP180_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp280", BMP280_CHIP_ID },
{"bme280", BME280_CHIP_ID },
+ {"bmp380", BMP380_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index 969698518984..c98c67970265 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -72,6 +72,49 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
+static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP380_REG_CMD:
+ case BMP380_REG_CONFIG:
+ case BMP380_REG_FIFO_CONFIG_1:
+ case BMP380_REG_FIFO_CONFIG_2:
+ case BMP380_REG_FIFO_WATERMARK_LSB:
+ case BMP380_REG_FIFO_WATERMARK_MSB:
+ case BMP380_REG_POWER_CONTROL:
+ case BMP380_REG_INT_CONTROL:
+ case BMP380_REG_IF_CONFIG:
+ case BMP380_REG_ODR:
+ case BMP380_REG_OSR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool bmp380_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP380_REG_TEMP_XLSB:
+ case BMP380_REG_TEMP_LSB:
+ case BMP380_REG_TEMP_MSB:
+ case BMP380_REG_PRESS_XLSB:
+ case BMP380_REG_PRESS_LSB:
+ case BMP380_REG_PRESS_MSB:
+ case BMP380_REG_SENSOR_TIME_XLSB:
+ case BMP380_REG_SENSOR_TIME_LSB:
+ case BMP380_REG_SENSOR_TIME_MSB:
+ case BMP380_REG_INT_STATUS:
+ case BMP380_REG_FIFO_DATA:
+ case BMP380_REG_STATUS:
+ case BMP380_REG_ERROR:
+ case BMP380_REG_EVENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct regmap_config bmp280_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -83,3 +126,15 @@ const struct regmap_config bmp280_regmap_config = {
.volatile_reg = bmp280_is_volatile_reg,
};
EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+
+const struct regmap_config bmp380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP380_REG_CMD,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp380_is_writeable_reg,
+ .volatile_reg = bmp380_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS(bmp380_regmap_config, IIO_BMP280);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 4cfaf3e869b8..011c68e07ebf 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -66,6 +66,9 @@ static int bmp280_spi_probe(struct spi_device *spi)
case BME280_CHIP_ID:
regmap_config = &bmp280_regmap_config;
break;
+ case BMP380_CHIP_ID:
+ regmap_config = &bmp380_regmap_config;
+ break;
default:
return -EINVAL;
}
@@ -92,6 +95,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp181", },
{ .compatible = "bosch,bmp280", },
{ .compatible = "bosch,bme280", },
+ { .compatible = "bosch,bmp380", },
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
@@ -101,6 +105,7 @@ static const struct spi_device_id bmp280_spi_id[] = {
{ "bmp181", BMP180_CHIP_ID },
{ "bmp280", BMP280_CHIP_ID },
{ "bme280", BME280_CHIP_ID },
+ { "bmp380", BMP380_CHIP_ID },
{ }
};
MODULE_DEVICE_TABLE(spi, bmp280_spi_id);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 57ba0e85db91..c791325c7416 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -3,6 +3,87 @@
#include <linux/device.h>
#include <linux/regmap.h>
+/* BMP380 specific registers */
+#define BMP380_REG_CMD 0x7E
+#define BMP380_REG_CONFIG 0x1F
+#define BMP380_REG_ODR 0x1D
+#define BMP380_REG_OSR 0x1C
+#define BMP380_REG_POWER_CONTROL 0x1B
+#define BMP380_REG_IF_CONFIG 0x1A
+#define BMP380_REG_INT_CONTROL 0x19
+#define BMP380_REG_INT_STATUS 0x11
+#define BMP380_REG_EVENT 0x10
+#define BMP380_REG_STATUS 0x03
+#define BMP380_REG_ERROR 0x02
+#define BMP380_REG_ID 0x00
+
+#define BMP380_REG_FIFO_CONFIG_1 0x18
+#define BMP380_REG_FIFO_CONFIG_2 0x17
+#define BMP380_REG_FIFO_WATERMARK_MSB 0x16
+#define BMP380_REG_FIFO_WATERMARK_LSB 0x15
+#define BMP380_REG_FIFO_DATA 0x14
+#define BMP380_REG_FIFO_LENGTH_MSB 0x13
+#define BMP380_REG_FIFO_LENGTH_LSB 0x12
+
+#define BMP380_REG_SENSOR_TIME_MSB 0x0E
+#define BMP380_REG_SENSOR_TIME_LSB 0x0D
+#define BMP380_REG_SENSOR_TIME_XLSB 0x0C
+
+#define BMP380_REG_TEMP_MSB 0x09
+#define BMP380_REG_TEMP_LSB 0x08
+#define BMP380_REG_TEMP_XLSB 0x07
+
+#define BMP380_REG_PRESS_MSB 0x06
+#define BMP380_REG_PRESS_LSB 0x05
+#define BMP380_REG_PRESS_XLSB 0x04
+
+#define BMP380_REG_CALIB_TEMP_START 0x31
+#define BMP380_CALIB_REG_COUNT 21
+
+#define BMP380_FILTER_MASK GENMASK(3, 1)
+#define BMP380_FILTER_OFF 0
+#define BMP380_FILTER_1X 1
+#define BMP380_FILTER_3X 2
+#define BMP380_FILTER_7X 3
+#define BMP380_FILTER_15X 4
+#define BMP380_FILTER_31X 5
+#define BMP380_FILTER_63X 6
+#define BMP380_FILTER_127X 7
+
+#define BMP380_OSRS_TEMP_MASK GENMASK(5, 3)
+#define BMP380_OSRS_PRESS_MASK GENMASK(2, 0)
+
+#define BMP380_ODRS_MASK GENMASK(4, 0)
+
+#define BMP380_CTRL_SENSORS_MASK GENMASK(1, 0)
+#define BMP380_CTRL_SENSORS_PRESS_EN BIT(0)
+#define BMP380_CTRL_SENSORS_TEMP_EN BIT(1)
+#define BMP380_MODE_MASK GENMASK(5, 4)
+#define BMP380_MODE_SLEEP 0
+#define BMP380_MODE_FORCED 1
+#define BMP380_MODE_NORMAL 3
+
+#define BMP380_MIN_TEMP -4000
+#define BMP380_MAX_TEMP 8500
+#define BMP380_MIN_PRES 3000000
+#define BMP380_MAX_PRES 12500000
+
+#define BMP380_CMD_NOOP 0x00
+#define BMP380_CMD_EXTMODE_EN_MID 0x34
+#define BMP380_CMD_FIFO_FLUSH 0xB0
+#define BMP380_CMD_SOFT_RESET 0xB6
+
+#define BMP380_STATUS_CMD_RDY_MASK BIT(4)
+#define BMP380_STATUS_DRDY_PRESS_MASK BIT(5)
+#define BMP380_STATUS_DRDY_TEMP_MASK BIT(6)
+
+#define BMP380_ERR_FATAL_MASK BIT(0)
+#define BMP380_ERR_CMD_MASK BIT(1)
+#define BMP380_ERR_CONF_MASK BIT(2)
+
+#define BMP380_TEMP_SKIPPED 0x800000
+#define BMP380_PRESS_SKIPPED 0x800000
+
/* BMP280 specific registers */
#define BMP280_REG_HUMIDITY_LSB 0xFE
#define BMP280_REG_HUMIDITY_MSB 0xFD
@@ -13,6 +94,9 @@
#define BMP280_REG_PRESS_LSB 0xF8
#define BMP280_REG_PRESS_MSB 0xF7
+/* Helper mask to truncate excess 4 bits on pressure and temp readings */
+#define BMP280_MEAS_TRIM_MASK GENMASK(24, 4)
+
#define BMP280_REG_CONFIG 0xF5
#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_STATUS 0xF3
@@ -32,44 +116,46 @@
#define BMP280_REG_COMP_PRESS_START 0x8E
#define BMP280_COMP_PRESS_REG_COUNT 18
-#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_COMP_H5_MASK GENMASK(15, 4)
+
+#define BMP280_CONTIGUOUS_CALIB_REGS (BMP280_COMP_TEMP_REG_COUNT + \
+ BMP280_COMP_PRESS_REG_COUNT)
+
+#define BMP280_FILTER_MASK GENMASK(4, 2)
#define BMP280_FILTER_OFF 0
-#define BMP280_FILTER_2X BIT(2)
-#define BMP280_FILTER_4X BIT(3)
-#define BMP280_FILTER_8X (BIT(3) | BIT(2))
-#define BMP280_FILTER_16X BIT(4)
+#define BMP280_FILTER_2X 1
+#define BMP280_FILTER_4X 2
+#define BMP280_FILTER_8X 3
+#define BMP280_FILTER_16X 4
-#define BMP280_OSRS_HUMIDITY_MASK (BIT(2) | BIT(1) | BIT(0))
-#define BMP280_OSRS_HUMIDITIY_X(osrs_h) ((osrs_h) << 0)
+#define BMP280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
#define BMP280_OSRS_HUMIDITY_SKIP 0
-#define BMP280_OSRS_HUMIDITY_1X BMP280_OSRS_HUMIDITIY_X(1)
-#define BMP280_OSRS_HUMIDITY_2X BMP280_OSRS_HUMIDITIY_X(2)
-#define BMP280_OSRS_HUMIDITY_4X BMP280_OSRS_HUMIDITIY_X(3)
-#define BMP280_OSRS_HUMIDITY_8X BMP280_OSRS_HUMIDITIY_X(4)
-#define BMP280_OSRS_HUMIDITY_16X BMP280_OSRS_HUMIDITIY_X(5)
+#define BMP280_OSRS_HUMIDITY_1X 1
+#define BMP280_OSRS_HUMIDITY_2X 2
+#define BMP280_OSRS_HUMIDITY_4X 3
+#define BMP280_OSRS_HUMIDITY_8X 4
+#define BMP280_OSRS_HUMIDITY_16X 5
-#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_MASK GENMASK(7, 5)
#define BMP280_OSRS_TEMP_SKIP 0
-#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
-#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
-#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
-#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
-#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
-#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
-
-#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_TEMP_1X 1
+#define BMP280_OSRS_TEMP_2X 2
+#define BMP280_OSRS_TEMP_4X 3
+#define BMP280_OSRS_TEMP_8X 4
+#define BMP280_OSRS_TEMP_16X 5
+
+#define BMP280_OSRS_PRESS_MASK GENMASK(4, 2)
#define BMP280_OSRS_PRESS_SKIP 0
-#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
-#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
-#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
-#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
-#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
-#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
-
-#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_OSRS_PRESS_1X 1
+#define BMP280_OSRS_PRESS_2X 2
+#define BMP280_OSRS_PRESS_4X 3
+#define BMP280_OSRS_PRESS_8X 4
+#define BMP280_OSRS_PRESS_16X 5
+
+#define BMP280_MODE_MASK GENMASK(1, 0)
#define BMP280_MODE_SLEEP 0
-#define BMP280_MODE_FORCED BIT(0)
-#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+#define BMP280_MODE_FORCED 1
+#define BMP280_MODE_NORMAL 3
/* BMP180 specific registers */
#define BMP180_REG_OUT_XLSB 0xF8
@@ -79,19 +165,22 @@
#define BMP180_REG_CALIB_START 0xAA
#define BMP180_REG_CALIB_COUNT 22
+#define BMP180_MEAS_CTRL_MASK GENMASK(4, 0)
+#define BMP180_MEAS_TEMP 0x0E
+#define BMP180_MEAS_PRESS 0x14
#define BMP180_MEAS_SCO BIT(5)
-#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
-#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
-#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
-#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+#define BMP180_OSRS_PRESS_MASK GENMASK(7, 6)
+#define BMP180_MEAS_PRESS_1X 0
+#define BMP180_MEAS_PRESS_2X 1
+#define BMP180_MEAS_PRESS_4X 2
+#define BMP180_MEAS_PRESS_8X 3
/* BMP180 and BMP280 common registers */
#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_RESET 0xE0
#define BMP280_REG_ID 0xD0
+#define BMP380_CHIP_ID 0x50
#define BMP180_CHIP_ID 0x55
#define BMP280_CHIP_ID 0x58
#define BME280_CHIP_ID 0x60
@@ -105,6 +194,7 @@
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
+extern const struct regmap_config bmp380_regmap_config;
/* Probe called from different transports */
int bmp280_common_probe(struct device *dev,
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 5f6bb3603a8b..f0b0d198c6d4 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -129,9 +129,8 @@ static int dlh_read_direct(struct dlh_state *st,
if (ret)
return ret;
- *pressure = get_unaligned_be32(&st->rx_buf[1]) >> 8;
- *temperature = get_unaligned_be32(&st->rx_buf[3]) &
- GENMASK(DLH_NUM_TEMP_BITS - 1, 0);
+ *pressure = get_unaligned_be24(&st->rx_buf[1]);
+ *temperature = get_unaligned_be24(&st->rx_buf[4]);
return 0;
}
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 36fb7ae0d0a9..984a3f511a1a 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -89,6 +89,7 @@ struct dps310_data {
s32 c00, c10, c20, c30, c01, c11, c21;
s32 pressure_raw;
s32 temp_raw;
+ bool timeout_recovery_failed;
};
static const struct iio_chan_spec dps310_channels[] = {
@@ -159,6 +160,102 @@ static int dps310_get_coefs(struct dps310_data *data)
return 0;
}
+/*
+ * Some versions of the chip will read temperatures in the ~60C range when
+ * it's actually ~20C. This is the manufacturer recommended workaround
+ * to correct the issue. The registers used below are undocumented.
+ */
+static int dps310_temp_workaround(struct dps310_data *data)
+{
+ int rc;
+ int reg;
+
+ rc = regmap_read(data->regmap, 0x32, &reg);
+ if (rc)
+ return rc;
+
+ /*
+ * If bit 1 is set then the device is okay, and the workaround does not
+ * need to be applied
+ */
+ if (reg & BIT(1))
+ return 0;
+
+ rc = regmap_write(data->regmap, 0x0e, 0xA5);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0f, 0x96);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x62, 0x02);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0e, 0x00);
+ if (rc)
+ return rc;
+
+ return regmap_write(data->regmap, 0x0f, 0x00);
+}
+
+static int dps310_startup(struct dps310_data *data)
+{
+ int rc;
+ int ready;
+
+ /*
+ * Set up pressure sensor in single sample, one measurement per second
+ * mode
+ */
+ rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
+ if (rc)
+ return rc;
+
+ /*
+ * Set up external (MEMS) temperature sensor in single sample, one
+ * measurement per second mode
+ */
+ rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
+ if (rc)
+ return rc;
+
+ /* Temp and pressure shifts are disabled when PRC <= 8 */
+ rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
+ DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
+ if (rc)
+ return rc;
+
+ /* MEAS_CFG doesn't update correctly unless first written with 0 */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, 0);
+ if (rc)
+ return rc;
+
+ /* Turn on temperature and pressure measurement in the background */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
+ DPS310_TEMP_EN | DPS310_BACKGROUND);
+ if (rc)
+ return rc;
+
+ /*
+ * Calibration coefficients required for reporting temperature.
+ * They are available 40ms after the device has started
+ */
+ rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
+ ready & DPS310_COEF_RDY, 10000, 40000);
+ if (rc)
+ return rc;
+
+ rc = dps310_get_coefs(data);
+ if (rc)
+ return rc;
+
+ return dps310_temp_workaround(data);
+}
+
static int dps310_get_pres_precision(struct dps310_data *data)
{
int rc;
@@ -297,11 +394,69 @@ static int dps310_get_temp_k(struct dps310_data *data)
return scale_factors[ilog2(rc)];
}
+static int dps310_reset_wait(struct dps310_data *data)
+{
+ int rc;
+
+ rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ if (rc)
+ return rc;
+
+ /* Wait for device chip access: 2.5ms in specification */
+ usleep_range(2500, 12000);
+ return 0;
+}
+
+static int dps310_reset_reinit(struct dps310_data *data)
+{
+ int rc;
+
+ rc = dps310_reset_wait(data);
+ if (rc)
+ return rc;
+
+ return dps310_startup(data);
+}
+
+static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int sleep = DPS310_POLL_SLEEP_US(timeout);
+ int ready;
+
+ return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit,
+ sleep, timeout);
+}
+
+static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int rc;
+
+ rc = dps310_ready_status(data, ready_bit, timeout);
+ if (rc) {
+ if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) {
+ /* Reset and reinitialize the chip. */
+ if (dps310_reset_reinit(data)) {
+ data->timeout_recovery_failed = true;
+ } else {
+ /* Try again to get sensor ready status. */
+ if (dps310_ready_status(data, ready_bit, timeout))
+ data->timeout_recovery_failed = true;
+ else
+ return 0;
+ }
+ }
+
+ return rc;
+ }
+
+ data->timeout_recovery_failed = false;
+ return 0;
+}
+
static int dps310_read_pres_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
s32 raw;
u8 val[3];
@@ -313,9 +468,7 @@ static int dps310_read_pres_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_PRS_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
+ rc = dps310_ready(data, DPS310_PRS_RDY, timeout);
if (rc)
goto done;
@@ -352,7 +505,6 @@ static int dps310_read_temp_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
if (mutex_lock_interruptible(&data->lock))
@@ -362,10 +514,8 @@ static int dps310_read_temp_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_TMP_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
- if (rc < 0)
+ rc = dps310_ready(data, DPS310_TMP_RDY, timeout);
+ if (rc)
goto done;
rc = dps310_read_temp_ready(data);
@@ -660,7 +810,7 @@ static void dps310_reset(void *action_data)
{
struct dps310_data *data = action_data;
- regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ dps310_reset_wait(data);
}
static const struct regmap_config dps310_regmap_config = {
@@ -677,52 +827,12 @@ static const struct iio_info dps310_info = {
.write_raw = dps310_write_raw,
};
-/*
- * Some verions of chip will read temperatures in the ~60C range when
- * its actually ~20C. This is the manufacturer recommended workaround
- * to correct the issue. The registers used below are undocumented.
- */
-static int dps310_temp_workaround(struct dps310_data *data)
-{
- int rc;
- int reg;
-
- rc = regmap_read(data->regmap, 0x32, &reg);
- if (rc < 0)
- return rc;
-
- /*
- * If bit 1 is set then the device is okay, and the workaround does not
- * need to be applied
- */
- if (reg & BIT(1))
- return 0;
-
- rc = regmap_write(data->regmap, 0x0e, 0xA5);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0f, 0x96);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x62, 0x02);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0e, 0x00);
- if (rc < 0)
- return rc;
-
- return regmap_write(data->regmap, 0x0f, 0x00);
-}
-
static int dps310_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct dps310_data *data;
struct iio_dev *iio;
- int rc, ready;
+ int rc;
iio = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!iio)
@@ -747,54 +857,8 @@ static int dps310_probe(struct i2c_client *client,
if (rc)
return rc;
- /*
- * Set up pressure sensor in single sample, one measurement per second
- * mode
- */
- rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
-
- /*
- * Set up external (MEMS) temperature sensor in single sample, one
- * measurement per second mode
- */
- rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
- if (rc < 0)
- return rc;
-
- /* Temp and pressure shifts are disabled when PRC <= 8 */
- rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
- DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
- if (rc < 0)
- return rc;
-
- /* MEAS_CFG doesn't update correctly unless first written with 0 */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, 0);
- if (rc < 0)
- return rc;
-
- /* Turn on temperature and pressure measurement in the background */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
- DPS310_TEMP_EN | DPS310_BACKGROUND);
- if (rc < 0)
- return rc;
-
- /*
- * Calibration coefficients required for reporting temperature.
- * They are available 40ms after the device has started
- */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_COEF_RDY, 10000, 40000);
- if (rc < 0)
- return rc;
-
- rc = dps310_get_coefs(data);
- if (rc < 0)
- return rc;
-
- rc = dps310_temp_workaround(data);
- if (rc < 0)
+ rc = dps310_startup(data);
+ if (rc)
return rc;
rc = devm_iio_device_register(&client->dev, iio);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index af4621eaa6b5..b62f28585db5 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -595,7 +595,7 @@ static int icp10100_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int __maybe_unused icp10100_suspend(struct device *dev)
+static int icp10100_suspend(struct device *dev)
{
struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -607,7 +607,7 @@ static int __maybe_unused icp10100_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused icp10100_resume(struct device *dev)
+static int icp10100_resume(struct device *dev)
{
struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -626,8 +626,8 @@ out_unlock:
return ret;
}
-static UNIVERSAL_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume,
- NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume,
+ NULL);
static const struct of_device_id icp10100_of_match[] = {
{
@@ -646,7 +646,7 @@ MODULE_DEVICE_TABLE(i2c, icp10100_id);
static struct i2c_driver icp10100_driver = {
.driver = {
.name = "icp10100",
- .pm = &icp10100_pm,
+ .pm = pm_ptr(&icp10100_pm),
.of_match_table = icp10100_of_match,
},
.probe = icp10100_probe,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 156e6a72dc5c..6e11bea784fa 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -22,6 +22,7 @@ enum st_press_type {
LPS33HW,
LPS35HW,
LPS22HH,
+ LPS22DF,
ST_PRESS_MAX,
};
@@ -32,6 +33,7 @@ enum st_press_type {
#define LPS33HW_PRESS_DEV_NAME "lps33hw"
#define LPS35HW_PRESS_DEV_NAME "lps35hw"
#define LPS22HH_PRESS_DEV_NAME "lps22hh"
+#define LPS22DF_PRESS_DEV_NAME "lps22df"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 76913a2028d2..80176e3083af 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -552,6 +552,76 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ /*
+ * CUSTOM VALUES FOR LPS22DF SENSOR
+ * See LPS22DF datasheet:
+ * http://www.st.com/resource/en/datasheet/lps22df.pdf
+ */
+ .wai = 0xb4,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LPS22DF_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+ .odr = {
+ .addr = 0x10,
+ .mask = 0x78,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01 },
+ { .hz = 4, .value = 0x02 },
+ { .hz = 10, .value = 0x03 },
+ { .hz = 25, .value = 0x04 },
+ { .hz = 50, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ { .hz = 100, .value = 0x07 },
+ { .hz = 200, .value = 0x08 },
+ },
+ },
+ .pw = {
+ .addr = 0x10,
+ .mask = 0x78,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 2 of LPS22DF datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x11,
+ .mask = BIT(3),
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x13,
+ .mask = BIT(5),
+ .addr_od = 0x12,
+ .mask_od = BIT(1),
+ },
+ .addr_ihl = 0x12,
+ .mask_ihl = BIT(3),
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x0E,
+ .value = BIT(5),
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
};
static int st_press_write_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 7035777fd988..58fede861891 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -47,6 +47,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hh",
.data = LPS22HH_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22df",
+ .data = LPS22DF_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -67,6 +71,7 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS33HW_PRESS_DEV_NAME, LPS33HW },
{ LPS35HW_PRESS_DEV_NAME, LPS35HW },
{ LPS22HH_PRESS_DEV_NAME, LPS22HH },
+ { LPS22DF_PRESS_DEV_NAME, LPS22DF },
{},
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index bfab8e7fb061..25cca5ad7c55 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -51,6 +51,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hh",
.data = LPS22HH_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22df",
+ .data = LPS22DF_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -97,6 +101,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS33HW_PRESS_DEV_NAME },
{ LPS35HW_PRESS_DEV_NAME },
{ LPS22HH_PRESS_DEV_NAME },
+ { LPS22DF_PRESS_DEV_NAME },
{ "lps001wp-press" },
{ "lps25h-press", },
{ "lps331ap-press" },
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 05015351a34a..faf2f806ce80 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -359,7 +359,7 @@ static int srf04_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev)
+static int srf04_pm_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
@@ -371,7 +371,7 @@ static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused srf04_pm_runtime_resume(struct device *dev)
+static int srf04_pm_runtime_resume(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
@@ -385,8 +385,8 @@ static int __maybe_unused srf04_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops srf04_pm_ops = {
- SET_RUNTIME_PM_OPS(srf04_pm_runtime_suspend,
- srf04_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(srf04_pm_runtime_suspend,
+ srf04_pm_runtime_resume, NULL)
};
static struct platform_driver srf04_driver = {
@@ -395,7 +395,7 @@ static struct platform_driver srf04_driver = {
.driver = {
.name = "srf04-gpio",
.of_match_table = of_srf04_match,
- .pm = &srf04_pm_ops,
+ .pm = pm_ptr(&srf04_pm_ops),
},
};
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index ea7318b508ea..0e4747ccd3cf 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -965,7 +965,7 @@ static int sx9310_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9310_chip_info, &sx9310_regmap_config);
}
-static int __maybe_unused sx9310_suspend(struct device *dev)
+static int sx9310_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
u8 ctrl0;
@@ -991,7 +991,7 @@ out:
return ret;
}
-static int __maybe_unused sx9310_resume(struct device *dev)
+static int sx9310_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -1013,7 +1013,7 @@ out:
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9310_pm_ops, sx9310_suspend, sx9310_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9310_pm_ops, sx9310_suspend, sx9310_resume);
static const struct acpi_device_id sx9310_acpi_match[] = {
{ "STH9310", SX9310_WHOAMI_VALUE },
@@ -1041,7 +1041,7 @@ static struct i2c_driver sx9310_driver = {
.name = "sx9310",
.acpi_match_table = sx9310_acpi_match,
.of_match_table = sx9310_of_match,
- .pm = &sx9310_pm_ops,
+ .pm = pm_sleep_ptr(&sx9310_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index edb5a2ce4e27..977cf17cec52 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -1073,7 +1073,7 @@ static int sx9324_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9324_chip_info, &sx9324_regmap_config);
}
-static int __maybe_unused sx9324_suspend(struct device *dev)
+static int sx9324_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
unsigned int regval;
@@ -1098,7 +1098,7 @@ out:
return ret;
}
-static int __maybe_unused sx9324_resume(struct device *dev)
+static int sx9324_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -1114,7 +1114,7 @@ static int __maybe_unused sx9324_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9324_pm_ops, sx9324_suspend, sx9324_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9324_pm_ops, sx9324_suspend, sx9324_resume);
static const struct acpi_device_id sx9324_acpi_match[] = {
{ "STH9324", SX9324_WHOAMI_VALUE },
@@ -1139,7 +1139,7 @@ static struct i2c_driver sx9324_driver = {
.name = "sx9324",
.acpi_match_table = sx9324_acpi_match,
.of_match_table = sx9324_of_match,
- .pm = &sx9324_pm_ops,
+ .pm = pm_sleep_ptr(&sx9324_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index d9a12e6be6ca..7fa2213d23ba 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -819,7 +819,7 @@ static int sx9360_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9360_chip_info, &sx9360_regmap_config);
}
-static int __maybe_unused sx9360_suspend(struct device *dev)
+static int sx9360_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
unsigned int regval;
@@ -844,7 +844,7 @@ out:
return ret;
}
-static int __maybe_unused sx9360_resume(struct device *dev)
+static int sx9360_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -861,7 +861,7 @@ static int __maybe_unused sx9360_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9360_pm_ops, sx9360_suspend, sx9360_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9360_pm_ops, sx9360_suspend, sx9360_resume);
static const struct acpi_device_id sx9360_acpi_match[] = {
{ "STH9360", SX9360_WHOAMI_VALUE },
@@ -886,7 +886,7 @@ static struct i2c_driver sx9360_driver = {
.name = "sx9360",
.acpi_match_table = sx9360_acpi_match,
.of_match_table = sx9360_of_match,
- .pm = &sx9360_pm_ops,
+ .pm = pm_sleep_ptr(&sx9360_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 0808bb865928..8eb0f962ed25 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -79,16 +79,15 @@ struct mlx90614_data {
/* Bandwidth values for IIR filtering */
static const int mlx90614_iir_values[] = {77, 31, 20, 15, 723, 153, 110, 86};
-static IIO_CONST_ATTR(in_temp_object_filter_low_pass_3db_frequency_available,
- "0.15 0.20 0.31 0.77 0.86 1.10 1.53 7.23");
-
-static struct attribute *mlx90614_attributes[] = {
- &iio_const_attr_in_temp_object_filter_low_pass_3db_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group mlx90614_attr_group = {
- .attrs = mlx90614_attributes,
+static const int mlx90614_freqs[][2] = {
+ {0, 150000},
+ {0, 200000},
+ {0, 310000},
+ {0, 770000},
+ {0, 860000},
+ {1, 100000},
+ {1, 530000},
+ {7, 230000}
};
/*
@@ -373,6 +372,22 @@ static int mlx90614_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
+static int mlx90614_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (int *)mlx90614_freqs;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(mlx90614_freqs);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_chan_spec mlx90614_channels[] = {
{
.type = IIO_TEMP,
@@ -389,6 +404,8 @@ static const struct iio_chan_spec mlx90614_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
},
@@ -401,6 +418,8 @@ static const struct iio_chan_spec mlx90614_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
},
@@ -410,7 +429,7 @@ static const struct iio_info mlx90614_info = {
.read_raw = mlx90614_read_raw,
.write_raw = mlx90614_write_raw,
.write_raw_get_fmt = mlx90614_write_raw_get_fmt,
- .attrs = &mlx90614_attr_group,
+ .read_avail = mlx90614_read_avail,
};
#ifdef CONFIG_PM
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index e8ef47147e2b..f6dec0e5f097 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -18,6 +18,7 @@
#include <linux/math64.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -128,6 +129,7 @@
* calculations
* @object_ambient_temperature: Ambient temperature at object (might differ of
* the ambient temperature of sensor.
+ * @regulator: Regulator of the device
*/
struct mlx90632_data {
struct i2c_client *client;
@@ -136,6 +138,7 @@ struct mlx90632_data {
u16 emissivity;
u8 mtyp;
u32 object_ambient_temperature;
+ struct regulator *regulator;
};
static const struct regmap_range mlx90632_volatile_reg_range[] = {
@@ -208,6 +211,15 @@ static s32 mlx90632_pwr_continuous(struct regmap *regmap)
}
/**
+ * mlx90632_reset_delay() - Give the mlx90632 some time to reset properly
+ * If this is not done, the following I2C command(s) will not be accepted.
+ */
+static void mlx90632_reset_delay(void)
+{
+ usleep_range(150, 200);
+}
+
+/**
* mlx90632_perform_measurement() - Trigger and retrieve current measurement cycle
* @data: pointer to mlx90632_data object containing regmap information
*
@@ -248,11 +260,7 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
- /*
- * Give the mlx90632 some time to reset properly before sending a new I2C command
- * if this is not done, the following I2C command(s) will not be accepted.
- */
- usleep_range(150, 200);
+ mlx90632_reset_delay();
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
@@ -841,6 +849,32 @@ static int mlx90632_wakeup(struct mlx90632_data *data)
return mlx90632_pwr_continuous(data->regmap);
}
+static void mlx90632_disable_regulator(void *_data)
+{
+ struct mlx90632_data *data = _data;
+ int ret;
+
+ ret = regulator_disable(data->regulator);
+ if (ret < 0)
+ dev_err(regmap_get_device(data->regmap),
+ "Failed to disable power regulator: %d\n", ret);
+}
+
+static int mlx90632_enable_regulator(struct mlx90632_data *data)
+{
+ int ret;
+
+ ret = regulator_enable(data->regulator);
+ if (ret < 0) {
+ dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n");
+ return ret;
+ }
+
+ mlx90632_reset_delay();
+
+ return ret;
+}
+
static int mlx90632_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -876,6 +910,23 @@ static int mlx90632_probe(struct i2c_client *client,
indio_dev->channels = mlx90632_channels;
indio_dev->num_channels = ARRAY_SIZE(mlx90632_channels);
+ mlx90632->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(mlx90632->regulator))
+ return dev_err_probe(&client->dev, PTR_ERR(mlx90632->regulator),
+ "failed to get vdd regulator");
+
+ ret = mlx90632_enable_regulator(mlx90632);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, mlx90632_disable_regulator,
+ mlx90632);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to setup regulator cleanup action %d\n",
+ ret);
+ return ret;
+ }
+
ret = mlx90632_wakeup(mlx90632);
if (ret < 0) {
dev_err(&client->dev, "Wakeup failed: %d\n", ret);
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index cc782ccff880..31ee55a6faed 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -29,7 +29,7 @@ struct rescale_tc_data {
const char *expected_off;
};
-const struct rescale_tc_data scale_cases[] = {
+static const struct rescale_tc_data scale_cases[] = {
/*
* Typical use cases
*/
@@ -477,7 +477,7 @@ const struct rescale_tc_data scale_cases[] = {
},
};
-const struct rescale_tc_data offset_cases[] = {
+static const struct rescale_tc_data offset_cases[] = {
/*
* Typical use cases
*/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index b985e0d9bc05..1f9938a2c475 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -175,6 +175,7 @@ struct cm_device {
struct cm_av {
struct cm_port *port;
struct rdma_ah_attr ah_attr;
+ u16 dlid_datapath;
u16 pkey_index;
u8 timeout;
};
@@ -617,7 +618,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
- __be64 service_mask = cm_id_priv->id.service_mask;
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
@@ -625,9 +625,16 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
- if ((cur_cm_id_priv->id.service_mask & service_id) ==
- (service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
+
+ if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
+ link = &(*link)->rb_left;
+ else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
+ link = &(*link)->rb_right;
+ else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
+ link = &(*link)->rb_left;
+ else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
+ link = &(*link)->rb_right;
+ else {
/*
* Sharing an ib_cm_id with different handlers is not
* supported
@@ -643,17 +650,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
spin_unlock_irqrestore(&cm.lock, flags);
return cur_cm_id_priv;
}
-
- if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
- link = &(*link)->rb_left;
- else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
- link = &(*link)->rb_right;
- else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
- link = &(*link)->rb_left;
- else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
- link = &(*link)->rb_right;
- else
- link = &(*link)->rb_right;
}
cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link);
@@ -670,12 +666,7 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
- if ((cm_id_priv->id.service_mask & service_id) ==
- cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device)) {
- refcount_inc(&cm_id_priv->refcount);
- return cm_id_priv;
- }
+
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
@@ -684,8 +675,10 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
- else
- node = node->rb_right;
+ else {
+ refcount_inc(&cm_id_priv->refcount);
+ return cm_id_priv;
+ }
}
return NULL;
}
@@ -1158,22 +1151,17 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
-static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
- __be64 service_mask)
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
{
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
- service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
return -EINVAL;
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID)
cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
- } else {
+ else
cm_id_priv->id.service_id = service_id;
- cm_id_priv->id.service_mask = service_mask;
- }
+
return 0;
}
@@ -1185,12 +1173,8 @@ static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
{
struct cm_id_private *cm_id_priv =
container_of(cm_id, struct cm_id_private, id);
@@ -1203,7 +1187,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
goto out;
}
- ret = cm_init_listen(cm_id_priv, service_id, service_mask);
+ ret = cm_init_listen(cm_id_priv, service_id);
if (ret)
goto out;
@@ -1251,7 +1235,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
if (IS_ERR(cm_id_priv))
return ERR_CAST(cm_id_priv);
- err = cm_init_listen(cm_id_priv, service_id, 0);
+ err = cm_init_listen(cm_id_priv, service_id);
if (err) {
ib_destroy_cm_id(&cm_id_priv->id);
return ERR_PTR(err);
@@ -1321,6 +1305,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct sa_path_rec *pri_path = param->primary_path;
struct sa_path_rec *alt_path = param->alternate_path;
bool pri_ext = false;
+ __be16 lid;
if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
@@ -1380,9 +1365,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
htons(ntohl(sa_path_get_dlid(
pri_path)))));
} else {
+
+ if (param->primary_path_inbound) {
+ lid = param->primary_path_inbound->ib.dlid;
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(lid));
+ } else
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+
/* Work-around until there's a way to obtain remote LID info */
- IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
- be16_to_cpu(IB_LID_PERMISSIVE));
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
be16_to_cpu(IB_LID_PERMISSIVE));
}
@@ -1522,7 +1514,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
}
}
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
@@ -1538,6 +1529,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av);
+ if (param->primary_path_outbound)
+ cm_id_priv->av.dlid_datapath =
+ be16_to_cpu(param->primary_path_outbound->ib.dlid);
+
if (param->alternate_path)
cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
@@ -1632,14 +1627,13 @@ static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
u32 lid;
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(primary_path,
- IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
- req_msg));
+ sa_path_set_dlid(primary_path, wc->slid);
sa_path_set_slid(primary_path,
IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
req_msg));
@@ -1676,7 +1670,8 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
primary_path->dgid =
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
@@ -1734,7 +1729,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
if (sa_path_is_roce(alt_path))
alt_path->roce.route_resolved = false;
}
- cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
}
static u16 cm_get_bth_pkey(struct cm_work *work)
@@ -2079,7 +2074,6 @@ static int cm_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
@@ -2148,7 +2142,7 @@ static int cm_req_handler(struct cm_work *work)
if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
+ &work->path[1], work->mad_recv_wc->wc);
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
@@ -2173,6 +2167,10 @@ static int cm_req_handler(struct cm_work *work)
NULL, 0);
goto rejected;
}
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
+ cm_id_priv->av.dlid_datapath =
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
+
if (cm_req_has_alt_path(req_msg)) {
ret = cm_init_av_by_path(&work->path[1], NULL,
&cm_id_priv->alt_av);
@@ -3486,7 +3484,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av);
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
if (cm_id->state != IB_CM_IDLE) {
@@ -3561,7 +3558,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = sidr_req_msg->hdr.tid;
wc = work->mad_recv_wc->wc;
@@ -4134,6 +4130,10 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
+ if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
+ cm_id_priv->av.dlid_datapath &&
+ (cm_id_priv->av.dlid_datapath != 0xffff))
+ qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index be317f2665a9..cc2222b85c88 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2026,6 +2026,8 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
+ kfree(id_priv->id.route.path_rec_inbound);
+ kfree(id_priv->id.route.path_rec_outbound);
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
@@ -2241,14 +2243,14 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
goto err;
rt = &id->route;
- rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
- rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
- GFP_KERNEL);
+ rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
+ rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
+ sizeof(*rt->path_rec), GFP_KERNEL);
if (!rt->path_rec)
goto err;
rt->path_rec[0] = *path;
- if (rt->num_paths == 2)
+ if (rt->num_pri_alt_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
@@ -2817,26 +2819,72 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
}
EXPORT_SYMBOL(rdma_set_min_rnr_timer);
+static void route_set_path_rec_inbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_inbound) {
+ route->path_rec_inbound =
+ kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
+ if (!route->path_rec_inbound)
+ return;
+ }
+
+ *route->path_rec_inbound = *path_rec;
+}
+
+static void route_set_path_rec_outbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_outbound) {
+ route->path_rec_outbound =
+ kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
+ if (!route->path_rec_outbound)
+ return;
+ }
+
+ *route->path_rec_outbound = *path_rec;
+}
+
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
- void *context)
+ int num_prs, void *context)
{
struct cma_work *work = context;
struct rdma_route *route;
+ int i;
route = &work->id->id.route;
- if (!status) {
- route->num_paths = 1;
- *route->path_rec = *path_rec;
- } else {
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
- work->event.status = status;
- pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
- status);
+ if (status)
+ goto fail;
+
+ for (i = 0; i < num_prs; i++) {
+ if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
+ *route->path_rec = path_rec[i];
+ else if (path_rec[i].flags & IB_PATH_INBOUND)
+ route_set_path_rec_inbound(work, &path_rec[i]);
+ else if (path_rec[i].flags & IB_PATH_OUTBOUND)
+ route_set_path_rec_outbound(work, &path_rec[i]);
}
+ if (!route->path_rec) {
+ status = -EINVAL;
+ goto fail;
+ }
+
+ route->num_pri_alt_paths = 1;
+ queue_work(cma_wq, &work->work);
+ return;
+fail:
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
+ status);
queue_work(cma_wq, &work->work);
}
@@ -3081,7 +3129,7 @@ int rdma_set_ib_path(struct rdma_cm_id *id,
dev_put(ndev);
}
- id->route.num_paths = 1;
+ id->route.num_pri_alt_paths = 1;
return 0;
err_free:
@@ -3214,7 +3262,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err1;
}
- route->num_paths = 1;
+ route->num_pri_alt_paths = 1;
ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
@@ -3274,7 +3322,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
- route->num_paths = 0;
+ route->num_pri_alt_paths = 0;
err1:
kfree(work);
return ret;
@@ -3759,7 +3807,7 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32() % remaining + low;
+ rover = prandom_u32_max(remaining) + low;
retry:
if (last_used_port != rover) {
struct rdma_bind_list *bind_list;
@@ -4265,7 +4313,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
}
req.primary_path = &route->path_rec[0];
- if (route->num_paths == 2)
+ req.primary_path_inbound = route->path_rec_inbound;
+ req.primary_path_outbound = route->path_rec_outbound;
+ if (route->num_pri_alt_paths == 2)
req.alternate_path = &route->path_rec[1];
req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index de8a2d5d741c..7b68b3ea979f 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -292,7 +292,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strscpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d275db195f1a..ae60c73babcc 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -422,7 +422,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
return ret;
}
- strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev);
downgrade_write(&devices_rwsem);
@@ -1217,7 +1217,7 @@ static int assign_name(struct ib_device *device, const char *name)
ret = -ENFILE;
goto out;
}
- strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
+ strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
&last_id, GFP_KERNEL);
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
index 7063e41eaf26..c77d7d2559a1 100644
--- a/drivers/infiniband/core/lag.c
+++ b/drivers/infiniband/core/lag.c
@@ -7,8 +7,7 @@
#include <rdma/ib_cache.h>
#include <rdma/lag.h>
-static struct sk_buff *rdma_build_skb(struct ib_device *device,
- struct net_device *netdev,
+static struct sk_buff *rdma_build_skb(struct net_device *netdev,
struct rdma_ah_attr *ah_attr,
gfp_t flags)
{
@@ -86,7 +85,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
struct net_device *slave;
struct sk_buff *skb;
- skb = rdma_build_skb(device, master, ah_attr, flags);
+ skb = rdma_build_skb(master, ah_attr, flags);
if (!skb)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 003e504feca2..0de83d9a4985 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -50,6 +50,7 @@
#include <rdma/ib_marshall.h>
#include <rdma/ib_addr.h>
#include <rdma/opa_addr.h>
+#include <rdma/rdma_cm.h>
#include "sa.h"
#include "core_priv.h"
@@ -104,7 +105,8 @@ struct ib_sa_device {
};
struct ib_sa_query {
- void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
+ void (*callback)(struct ib_sa_query *sa_query, int status,
+ int num_prs, struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -116,6 +118,12 @@ struct ib_sa_query {
u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */
+
+ /* A separate buffer to save pathrecords of a response, as in cases
+ * like IB/netlink, mulptiple pathrecords are supported, so that
+ * mad->data is not large enough to hold them
+ */
+ void *resp_pr_data;
};
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
@@ -123,7 +131,8 @@ struct ib_sa_query {
#define IB_SA_QUERY_OPA 0x00000004
struct ib_sa_path_query {
- void (*callback)(int, struct sa_path_rec *, void *);
+ void (*callback)(int status, struct sa_path_rec *rec,
+ int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr;
@@ -712,7 +721,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
sa_rec->reversible != 0)
- query->path_use = LS_RESOLVE_PATH_USE_GMP;
+ query->path_use = LS_RESOLVE_PATH_USE_ALL;
else
query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
header->path_use = query->path_use;
@@ -865,50 +874,81 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
+ struct ib_path_rec_data *srec, *drec;
+ struct ib_sa_path_query *path_query;
struct ib_mad_send_wc mad_send_wc;
- struct ib_sa_mad *mad = NULL;
const struct nlattr *head, *curr;
- struct ib_path_rec_data *rec;
- int len, rem;
+ struct ib_sa_mad *mad = NULL;
+ int len, rem, num_prs = 0;
u32 mask = 0;
int status = -EIO;
- if (query->callback) {
- head = (const struct nlattr *) nlmsg_data(nlh);
- len = nlmsg_len(nlh);
- switch (query->path_use) {
- case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
- mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
- break;
+ if (!query->callback)
+ goto out;
- case LS_RESOLVE_PATH_USE_ALL:
- case LS_RESOLVE_PATH_USE_GMP:
- default:
- mask = IB_PATH_PRIMARY | IB_PATH_GMP |
- IB_PATH_BIDIRECTIONAL;
- break;
+ path_query = container_of(query, struct ib_sa_path_query, sa_query);
+ mad = query->mad_buf->mad;
+ if (!path_query->conv_pr &&
+ (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
+ /* Need a larger buffer for possible multiple PRs */
+ query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
+ sizeof(*drec), GFP_KERNEL);
+ if (!query->resp_pr_data) {
+ query->callback(query, -ENOMEM, 0, NULL);
+ return;
}
- nla_for_each_attr(curr, head, len, rem) {
- if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
- rec = nla_data(curr);
- /*
- * Get the first one. In the future, we may
- * need to get up to 6 pathrecords.
- */
- if ((rec->flags & mask) == mask) {
- mad = query->mad_buf->mad;
- mad->mad_hdr.method |=
- IB_MGMT_METHOD_RESP;
- memcpy(mad->data, rec->path_rec,
- sizeof(rec->path_rec));
- status = 0;
- break;
- }
- }
+ }
+
+ head = (const struct nlattr *) nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+ switch (query->path_use) {
+ case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
+ mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
+ break;
+
+ case LS_RESOLVE_PATH_USE_ALL:
+ mask = IB_PATH_PRIMARY;
+ break;
+
+ case LS_RESOLVE_PATH_USE_GMP:
+ default:
+ mask = IB_PATH_PRIMARY | IB_PATH_GMP |
+ IB_PATH_BIDIRECTIONAL;
+ break;
+ }
+
+ drec = (struct ib_path_rec_data *)query->resp_pr_data;
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
+ continue;
+
+ srec = nla_data(curr);
+ if ((srec->flags & mask) != mask)
+ continue;
+
+ status = 0;
+ if (!drec) {
+ memcpy(mad->data, srec->path_rec,
+ sizeof(srec->path_rec));
+ num_prs = 1;
+ break;
}
- query->callback(query, status, mad);
+
+ memcpy(drec, srec, sizeof(*drec));
+ drec++;
+ num_prs++;
+ if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
+ break;
}
+ if (!status)
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+
+ query->callback(query, status, num_prs, mad);
+ kvfree(query->resp_pr_data);
+ query->resp_pr_data = NULL;
+
+out:
mad_send_wc.send_buf = query->mad_buf;
mad_send_wc.status = IB_WC_SUCCESS;
send_handler(query->mad_buf->mad_agent, &mad_send_wc);
@@ -1411,41 +1451,90 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
return PR_IB_SUPPORTED;
}
+static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
+ int status, struct ib_sa_mad *mad)
+{
+ struct sa_path_rec rec = {};
+
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&rec);
+
+ if (query->conv_pr) {
+ struct sa_path_rec opa;
+
+ memset(&opa, 0, sizeof(struct sa_path_rec));
+ sa_convert_path_ib_to_opa(&opa, &rec);
+ query->callback(status, &opa, 1, query->context);
+ } else {
+ query->callback(status, &rec, 1, query->context);
+ }
+}
+
+/**
+ * ib_sa_pr_callback_multiple() - Parse path records then do callback.
+ *
+ * In a multiple-PR case the PRs are saved in "query->resp_pr_data"
+ * (instead of"mad->data") and with "ib_path_rec_data" structure format,
+ * so that rec->flags can be set to indicate the type of PR.
+ * This is valid only in IB fabric.
+ */
+static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
+ int status, int num_prs,
+ struct ib_path_rec_data *rec_data)
+{
+ struct sa_path_rec *rec;
+ int i;
+
+ rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ for (i = 0; i < num_prs; i++) {
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec_data[i].path_rec, rec + i);
+ rec[i].rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(rec + i);
+ rec[i].flags = rec_data[i].flags;
+ }
+
+ query->callback(status, rec, num_prs, query->context);
+ kvfree(rec);
+}
+
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query);
+ struct sa_path_rec rec;
- if (mad) {
- struct sa_path_rec rec;
-
- if (sa_query->flags & IB_SA_QUERY_OPA) {
- ib_unpack(opa_path_rec_table,
- ARRAY_SIZE(opa_path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_OPA;
- query->callback(status, &rec, query->context);
- } else {
- ib_unpack(path_rec_table,
- ARRAY_SIZE(path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_dmac_zero(&rec);
-
- if (query->conv_pr) {
- struct sa_path_rec opa;
+ if (!mad || !num_prs) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
- memset(&opa, 0, sizeof(struct sa_path_rec));
- sa_convert_path_ib_to_opa(&opa, &rec);
- query->callback(status, &opa, query->context);
- } else {
- query->callback(status, &rec, query->context);
- }
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ if (num_prs != 1) {
+ query->callback(-EINVAL, NULL, 0, query->context);
+ return;
}
- } else
- query->callback(status, NULL, query->context);
+
+ ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ query->callback(status, &rec, num_prs, query->context);
+ } else {
+ if (!sa_query->resp_pr_data)
+ ib_sa_pr_callback_single(query, status, mad);
+ else
+ ib_sa_pr_callback_multiple(query, status, num_prs,
+ sa_query->resp_pr_data);
+ }
}
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
@@ -1489,7 +1578,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
- void *context),
+ int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
@@ -1588,7 +1677,7 @@ err1:
EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
@@ -1680,7 +1769,7 @@ err1:
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_paths,
struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
@@ -1790,7 +1879,7 @@ static void ib_classportinfo_cb(void *context)
}
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
unsigned long flags;
@@ -1966,13 +2055,13 @@ static void send_handler(struct ib_mad_agent *agent,
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ query->callback(query, -ETIMEDOUT, 0, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ query->callback(query, -EINTR, 0, NULL);
break;
default:
- query->callback(query, -EIO, NULL);
+ query->callback(query, -EIO, 0, NULL);
break;
}
@@ -2000,10 +2089,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
+ -EINVAL : 0, 1,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else
- query->callback(query, -EIO, NULL);
+ query->callback(query, -EIO, 0, NULL);
}
ib_free_recv_mad(mad_recv_wc);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9d6ac9dff39a..bf42650f125b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -754,8 +754,8 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
{
struct rdma_dev_addr *dev_addr;
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
rdma_addr_get_dgid(dev_addr,
@@ -781,8 +781,8 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route)
{
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
(union ib_gid *)&resp->ib_route[0].dgid);
@@ -921,7 +921,7 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
if (!resp)
return -ENOMEM;
- resp->num_paths = ctx->cm_id->route.num_paths;
+ resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
for (i = 0, out_len -= sizeof(*resp);
i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
i++, out_len -= sizeof(struct ib_path_rec_data)) {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index d39e16c211e8..e9fa22d31c23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -43,8 +43,6 @@
#include <linux/hmm.h>
#include <linux/pagemap.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 046376bd68e2..4796f6a8828c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -739,6 +739,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_set_name(&mr->res, NULL);
@@ -861,8 +862,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
mr->pd = new_pd;
atomic_inc(&new_pd->usecnt);
}
- if (cmd.flags & IB_MR_REREG_TRANS)
+ if (cmd.flags & IB_MR_REREG_TRANS) {
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
+ }
}
memset(&resp, 0, sizeof(resp));
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 990f0724acc6..d9799706c58e 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -337,6 +337,14 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
break;
+ case UVERBS_ATTR_TYPE_RAW_FD:
+ if (uattr->attr_data.reserved || uattr->len != 0 ||
+ uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX)
+ return -EINVAL;
+ /* _uverbs_get_const_signed() is the accessor */
+ e->ptr_attr.data = uattr->data_s64;
+ break;
+
case UVERBS_ATTR_TYPE_IDRS_ARRAY:
return uverbs_process_idrs_array(pbundle, attr_uapi,
&e->objs_arr_attr, uattr,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e54b3f1b730e..26b021f43ba4 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1038,7 +1038,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
rdma_restrack_put(&srq->res);
- atomic_dec(&srq->pd->usecnt);
+ atomic_dec(&pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
@@ -2149,6 +2149,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->pd = pd;
mr->dm = NULL;
atomic_inc(&pd->usecnt);
+ mr->iova = virt_addr;
+ mr->length = length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_parent_name(&mr->res, &pd->res);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 3d6834d3d4fb..8c0c80a8d338 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -725,7 +725,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 14392c942f49..499a425a3379 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -734,7 +734,7 @@ static int send_connect(struct c4iw_ep *ep)
&ep->com.remote_addr;
int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
struct net_device *netdev;
u64 params;
@@ -2469,7 +2469,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (!is_t4(adapter_type)) {
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f64e7e02b129..280d61466855 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last += prandom_u32() % RANDOM_SKIP;
+ alloc->last += prandom_u32_max(RANDOM_SKIP);
else
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
@@ -85,7 +85,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last = prandom_u32() % RANDOM_SKIP;
+ alloc->last = prandom_u32_max(RANDOM_SKIP);
else
alloc->last = 0;
alloc->max = num;
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 0b0b93b529f3..d4b9226088bd 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -444,7 +444,10 @@ struct efa_admin_create_cq_cmd {
/*
* 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8.
- * 7:5 : reserved7 - MBZ
+ * 5 : set_src_addr - If set, source address will be
+ * filled on RX completions from unknown senders.
+ * Requires 8 words CQ entry size.
+ * 7:6 : reserved7 - MBZ
*/
u8 cq_caps_2;
@@ -980,6 +983,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK BIT(5)
/* create_cq_resp */
#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index fb405da4e1db..8f8885e002ba 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -168,7 +168,10 @@ int efa_com_create_cq(struct efa_com_dev *edev,
EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
create_cmd.eqn = params->eqn;
}
-
+ if (params->set_src_addr) {
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
+ }
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
&create_cmd.cq_ba.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index c33010bbf9e8..0898ad5bc340 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -75,7 +75,8 @@ struct efa_com_create_cq_params {
u16 uarn;
u16 eqn;
u8 entry_size_in_bytes;
- bool interrupt_mode_enabled;
+ u8 interrupt_mode_enabled : 1;
+ u8 set_src_addr : 1;
};
struct efa_com_create_cq_result {
diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h
new file mode 100644
index 000000000000..17ba8984b11e
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_io_defs.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_IO_H_
+#define _EFA_IO_H_
+
+#define EFA_IO_TX_DESC_NUM_BUFS 2
+#define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1
+#define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32
+#define EFA_IO_TX_DESC_IMM_DATA_SIZE 4
+
+enum efa_io_queue_type {
+ /* send queue (of a QP) */
+ EFA_IO_SEND_QUEUE = 1,
+ /* recv queue (of a QP) */
+ EFA_IO_RECV_QUEUE = 2,
+};
+
+enum efa_io_send_op_type {
+ /* send message */
+ EFA_IO_SEND = 0,
+ /* RDMA read */
+ EFA_IO_RDMA_READ = 1,
+};
+
+enum efa_io_comp_status {
+ /* Successful completion */
+ EFA_IO_COMP_STATUS_OK = 0,
+ /* Flushed during QP destroy */
+ EFA_IO_COMP_STATUS_FLUSHED = 1,
+ /* Internal QP error */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2,
+ /* Bad operation type */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_OP_TYPE = 3,
+ /* Bad AH */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4,
+ /* LKEY not registered or does not match IOVA */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5,
+ /* Message too long */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6,
+ /* Destination ENI is down or does not run EFA */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7,
+ /* Connection was reset by remote side */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8,
+ /* Bad dest QP number (QP does not exist or is in error state) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_DEST_QPN = 9,
+ /* Destination resource not ready (no WQEs posted on RQ) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_RNR = 10,
+ /* Receiver SGL too short */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11,
+ /* Unexpected status returned by responder */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12,
+ /* Unresponsive remote - detected locally */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13,
+};
+
+struct efa_io_tx_meta_desc {
+ /* Verbs-generated Request ID */
+ u16 req_id;
+
+ /*
+ * control flags
+ * 3:0 : op_type - operation type: send/rdma/fast mem
+ * ops/etc
+ * 4 : has_imm - immediate_data field carries valid
+ * data.
+ * 5 : inline_msg - inline mode - inline message data
+ * follows this descriptor (no buffer descriptors).
+ * Note that it is different from immediate data
+ * 6 : meta_extension - Extended metadata. MBZ
+ * 7 : meta_desc - Indicates metadata descriptor.
+ * Must be set.
+ */
+ u8 ctrl1;
+
+ /*
+ * control flags
+ * 0 : phase
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. Must be set.
+ * 3 : last - Indicates last descriptor in
+ * transaction. Must be set.
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for the first descriptor
+ * 7:5 : reserved29 - MBZ
+ */
+ u8 ctrl2;
+
+ u16 dest_qp_num;
+
+ /*
+ * If inline_msg bit is set, length of inline message in bytes,
+ * otherwise length of SGL (number of buffers).
+ */
+ u16 length;
+
+ /*
+ * immediate data: if has_imm is set, then this field is included
+ * within Tx message and reported in remote Rx completion.
+ */
+ u32 immediate_data;
+
+ u16 ah;
+
+ u16 reserved;
+
+ /* Queue key */
+ u32 qkey;
+
+ u8 reserved2[12];
+};
+
+/*
+ * Tx queue buffer descriptor, for any transport type. Preceded by metadata
+ * descriptor.
+ */
+struct efa_io_tx_buf_desc {
+ /* length in bytes */
+ u32 length;
+
+ /*
+ * 23:0 : lkey - local memory translation key
+ * 31:24 : reserved - MBZ
+ */
+ u32 lkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_remote_mem_addr {
+ /* length in bytes */
+ u32 length;
+
+ /* remote memory translation key */
+ u32 rkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_rdma_req {
+ /* Remote memory address */
+ struct efa_io_remote_mem_addr remote_mem;
+
+ /* Local memory address */
+ struct efa_io_tx_buf_desc local_mem[1];
+};
+
+/*
+ * Tx WQE, composed of tx meta descriptors followed by either tx buffer
+ * descriptors or inline data
+ */
+struct efa_io_tx_wqe {
+ /* TX meta */
+ struct efa_io_tx_meta_desc meta;
+
+ union {
+ /* Send buffer descriptors */
+ struct efa_io_tx_buf_desc sgl[2];
+
+ u8 inline_data[32];
+
+ /* RDMA local and remote memory addresses */
+ struct efa_io_rdma_req rdma_req;
+ } data;
+};
+
+/*
+ * Rx buffer descriptor; RX WQE is composed of one or more RX buffer
+ * descriptors.
+ */
+struct efa_io_rx_desc {
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer Pointer[63:32] */
+ u32 buf_addr_hi;
+
+ /* Verbs-generated request id. */
+ u16 req_id;
+
+ /* Length in bytes. */
+ u16 length;
+
+ /*
+ * LKey and control flags
+ * 23:0 : lkey
+ * 29:24 : reserved - MBZ
+ * 30 : first - Indicates first descriptor in WQE
+ * 31 : last - Indicates last descriptor in WQE
+ */
+ u32 lkey_ctrl;
+};
+
+/* Common IO completion descriptor */
+struct efa_io_cdesc_common {
+ /*
+ * verbs-generated request ID, as provided in the completed tx or rx
+ * descriptor.
+ */
+ u16 req_id;
+
+ u8 status;
+
+ /*
+ * flags
+ * 0 : phase - Phase bit
+ * 2:1 : q_type - enum efa_io_queue_type: send/recv
+ * 3 : has_imm - indicates that immediate data is
+ * present - for RX completions only
+ * 7:4 : reserved28 - MBZ
+ */
+ u8 flags;
+
+ /* local QP number */
+ u16 qp_num;
+
+ /* Transferred length */
+ u16 length;
+};
+
+/* Tx completion descriptor */
+struct efa_io_tx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+};
+
+/* Rx Completion Descriptor */
+struct efa_io_rx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+
+ /* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
+ u16 ah;
+
+ u16 src_qp_num;
+
+ /* Immediate data */
+ u32 imm;
+};
+
+/* Extended Rx Completion Descriptor */
+struct efa_io_rx_cdesc_ex {
+ /* Base RX completion info */
+ struct efa_io_rx_cdesc rx_cdesc_base;
+
+ /*
+ * Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is
+ * enabled.
+ */
+ u8 src_addr[16];
+};
+
+/* tx_meta_desc */
+#define EFA_IO_TX_META_DESC_OP_TYPE_MASK GENMASK(3, 0)
+#define EFA_IO_TX_META_DESC_HAS_IMM_MASK BIT(4)
+#define EFA_IO_TX_META_DESC_INLINE_MSG_MASK BIT(5)
+#define EFA_IO_TX_META_DESC_META_EXTENSION_MASK BIT(6)
+#define EFA_IO_TX_META_DESC_META_DESC_MASK BIT(7)
+#define EFA_IO_TX_META_DESC_PHASE_MASK BIT(0)
+#define EFA_IO_TX_META_DESC_FIRST_MASK BIT(2)
+#define EFA_IO_TX_META_DESC_LAST_MASK BIT(3)
+#define EFA_IO_TX_META_DESC_COMP_REQ_MASK BIT(4)
+
+/* tx_buf_desc */
+#define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0)
+
+/* rx_desc */
+#define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0)
+#define EFA_IO_RX_DESC_FIRST_MASK BIT(30)
+#define EFA_IO_RX_DESC_LAST_MASK BIT(31)
+
+/* cdesc_common */
+#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
+#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
+#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
+
+#endif /* _EFA_IO_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index ecfe70eb5efb..31454643f8c5 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/dma-buf.h>
@@ -15,6 +15,7 @@
#include <rdma/uverbs_ioctl.h>
#include "efa.h"
+#include "efa_io_defs.h"
enum {
EFA_MMAP_DMA_PAGE = 0,
@@ -242,6 +243,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rq_wr = dev_attr->max_rq_depth;
resp.max_rdma_size = dev_attr->max_rdma_size;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@@ -1064,6 +1066,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
int entries = attr->cqe;
+ bool set_src_addr;
int err;
ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
@@ -1109,7 +1112,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (!cmd.cq_entry_size) {
+ set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
+ if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
+ (set_src_addr ||
+ cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
ibdev_dbg(ibdev,
"Invalid entry size [%u]\n", cmd.cq_entry_size);
err = -EINVAL;
@@ -1138,6 +1144,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs;
+ params.set_src_addr = set_src_addr;
if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
cq->eq = efa_vec2eq(dev, attr->comp_vector);
params.eqn = cq->eq->eeq.eqn;
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 2aae635c1c8d..730783fbc894 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <linux/xarray.h>
#include <rdma/ib_verbs.h>
@@ -196,6 +197,7 @@ struct erdma_dev {
struct erdma_devattr attrs;
/* physical port state (only one port per device) */
enum ib_port_state state;
+ u32 mtu;
/* cmdq and aeq use the same msix vector */
struct erdma_irq comm_irq;
@@ -269,7 +271,7 @@ void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index f13f16479eca..74f6348f240a 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -10,15 +10,7 @@
/* Copyright (c) 2008-2019, IBM Corporation */
/* Copyright (c) 2017, Open Grid Computing, Inc. */
-#include <linux/errno.h>
-#include <linux/inetdevice.h>
-#include <linux/net.h>
-#include <linux/types.h>
#include <linux/workqueue.h>
-#include <net/addrconf.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index 57da0c670472..6ebfa6989b11 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -4,13 +4,7 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
#include "erdma.h"
-#include "erdma_hw.h"
-#include "erdma_verbs.h"
static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
{
@@ -441,7 +435,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
}
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1)
{
struct erdma_comp_wait *comp_wait;
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 751c7f9f0de7..58e0dc5c75d1 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -4,9 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <rdma/ib_verbs.h>
-
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static void *get_next_valid_cqe(struct erdma_cq *cq)
@@ -62,7 +59,6 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
[ERDMA_OP_RECV_INV] = IB_WC_RECV,
[ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
[ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
[ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
[ERDMA_OP_REG_MR] = IB_WC_REG_MR,
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 8f2d094e0227..ed54130d924b 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -4,12 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
-#include "erdma.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
#define MAX_POLL_CHUNK_SIZE 16
@@ -229,9 +223,7 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
- sizeof(struct erdma_cmdq_create_eq_req),
- NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -281,8 +273,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index b210c49c669f..e788887732e1 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -153,6 +153,7 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_CREATE_EQ = 0,
CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2,
+ CMDQ_OPCODE_CONF_MTU = 3,
};
/* cmdq-SQE HDR */
@@ -190,6 +191,11 @@ struct erdma_cmdq_destroy_eq_req {
u8 qtype;
};
+struct erdma_cmdq_config_mtu_req {
+ u64 hdr;
+ u32 mtu;
+};
+
/* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
@@ -450,13 +456,13 @@ enum erdma_opcode {
ERDMA_OP_RECV_IMM = 5,
ERDMA_OP_RECV_INV = 6,
- ERDMA_OP_REQ_ERR = 7,
- ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_RSVD0 = 7,
+ ERDMA_OP_RSVD1 = 8,
ERDMA_OP_WRITE_WITH_IMM = 9,
- ERDMA_OP_RECV_ERR = 10,
+ ERDMA_OP_RSVD2 = 10,
+ ERDMA_OP_RSVD3 = 11,
- ERDMA_OP_INVALIDATE = 11,
ERDMA_OP_RSP_SEND_IMM = 12,
ERDMA_OP_SEND_WITH_INV = 13,
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 07e743d24847..49778bb294ae 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -4,21 +4,12 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
@@ -43,10 +34,15 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
+ case NETDEV_CHANGEMTU:
+ if (dev->mtu != netdev->mtu) {
+ erdma_set_mtu(dev, netdev->mtu);
+ dev->mtu = netdev->mtu;
+ }
+ break;
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR:
- case NETDEV_CHANGEMTU:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGE:
default:
@@ -104,6 +100,7 @@ static int erdma_device_register(struct erdma_dev *dev)
if (ret)
return ret;
+ dev->mtu = dev->netdev->mtu;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index bc3ec22a62c5..5fe1a339a435 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -6,15 +6,6 @@
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
-
-#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_verbs.h"
@@ -105,8 +96,7 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
@@ -124,8 +114,7 @@ static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 699bd3f59cd3..62be98e2b941 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -9,21 +9,14 @@
/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include <rdma/uverbs_ioctl.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
@@ -102,7 +95,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
}
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1);
if (!err)
qp->attrs.cookie =
@@ -151,8 +144,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
@@ -202,8 +194,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
}
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -976,8 +967,7 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (ret)
return ret;
@@ -1002,8 +992,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1040,8 +1029,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1448,6 +1436,17 @@ err_out_xa:
return ret;
}
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
+{
+ struct erdma_cmdq_config_mtu_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CONF_MTU);
+ req.mtu = mtu;
+
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+}
+
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c7baddb1f292..ab6380635e9e 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -7,15 +7,7 @@
#ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__
-#include <linux/errno.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/iw_cm.h>
-
#include "erdma.h"
-#include "erdma_cm.h"
-#include "erdma_hw.h"
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
@@ -338,5 +330,6 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
#endif
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index f1245c94ae26..ebe970f76232 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8753,7 +8753,7 @@ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
/*
* When writing a LCB CSR, out_data contains the full value to
- * to be written, while in_data contains the relative LCB
+ * be written, while in_data contains the relative LCB
* address in 7:0. Do the work here, rather than the caller,
* of distrubting the write data to where it needs to go:
*
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 629beff053ad..f5f9269fdc16 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -965,7 +965,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
- strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
+ strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
uctxt->jkey = generate_jkey(current_uid());
hfi1_stats.sps_ctxts++;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index aa15a5cc7cf3..1d77514ebbee 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1114,7 +1114,7 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
* Reset all of the fabric serdes for this HFI in preparation to take the
* link to Polling.
*
- * To do a reset, we need to write to to the serdes registers. Unfortunately,
+ * To do a reset, we need to write to the serdes registers. Unfortunately,
* the fabric serdes download to the other HFI on the ASIC will have turned
* off the firmware validation on this HFI. This means we can't write to the
* registers to reset the serdes. Work around this by performing a complete
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
index 3afa7545242c..629691a572ef 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_rx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -11,13 +11,10 @@
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{
- void *dst_data;
-
skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data);
- dst_data = skb_put(skb, size);
- memcpy(dst_data, data, size);
+ skb_put_data(skb, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 2a7abf7a1f7f..18b05ffb415a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -850,7 +850,7 @@ void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
int i;
for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
- rcd->flows[i].generation = mask_generation(prandom_u32());
+ rcd->flows[i].generation = mask_generation(get_random_u32());
kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
}
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 6988f6f21bde..e6e17984553c 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1447,12 +1447,10 @@ static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
OPA_LINKDOWN_REASON_UNKNOWN);
- ret = set_link_state(ppd, HLS_DN_DOWNDEF);
- return ret;
+ return set_link_state(ppd, HLS_DN_DOWNDEF);
}
static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
@@ -1801,7 +1799,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ib_set_device_ops(ibdev, &hfi1_dev_ops);
- strlcpy(ibdev->node_desc, init_utsname()->nodename,
+ strscpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
/*
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 38565532d654..7f30f32b34dc 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -391,9 +391,6 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send);
-extern const u32 rc_only_opcode;
-extern const u32 uc_only_opcode;
-
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 9f04f25d9631..a7d259238305 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -10,6 +10,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 492b122d0521..480c062dd04f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -41,9 +41,8 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
u16 sport;
if (!fl)
- sport = get_random_u32() %
- (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
- IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
+ sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
else
sport = rdma_flow_label_to_udp_sport(fl);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 8acd599ffac1..736dc2f993b4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -454,7 +454,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
cqn);
return;
}
@@ -475,14 +475,14 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
event_type, cqn);
return;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index d24996526c4d..723e55a7de8d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -240,7 +240,6 @@ struct hns_roce_hem_table {
/* Single obj size */
unsigned long obj_size;
unsigned long table_chunk_size;
- int lowmem;
struct mutex mutex;
struct hns_roce_hem **hem;
u64 **bt_l1;
@@ -599,7 +598,6 @@ struct hns_roce_qp {
struct hns_roce_db rdb;
struct hns_roce_db sdb;
unsigned long en_flags;
- u32 doorbell_qpn;
enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq;
@@ -726,7 +724,7 @@ struct hns_roce_caps {
u32 max_sq_sg;
u32 max_sq_inline;
u32 max_rq_sg;
- u32 max_extend_sg;
+ u32 rsv0;
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
@@ -736,7 +734,7 @@ struct hns_roce_caps {
u32 max_srq_sges;
u32 max_sq_desc_sz;
u32 max_rq_desc_sz;
- u32 max_srq_desc_sz;
+ u32 rsv2;
int max_qp_init_rdma;
int max_qp_dest_rdma;
u32 num_cqs;
@@ -749,7 +747,7 @@ struct hns_roce_caps {
int num_comp_vectors;
int num_other_vectors;
u32 num_mtpts;
- u32 num_mtt_segs;
+ u32 rsv1;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
@@ -848,11 +846,6 @@ struct hns_roce_caps {
enum cong_type cong_type;
};
-struct hns_roce_dfx_hw {
- int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-};
-
enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_INITED,
HNS_ROCE_DEVICE_STATE_RST_DOWN,
@@ -898,6 +891,9 @@ struct hns_roce_hw {
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
+ int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
+ int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
+ int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -959,7 +955,6 @@ struct hns_roce_dev {
void *priv;
struct workqueue_struct *irq_workq;
struct work_struct ecc_work;
- const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
@@ -1227,8 +1222,12 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ce1a0d2792a3..aa8a08d1c014 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -455,7 +455,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
- flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
+ flag = GFP_KERNEL | __GFP_NOWARN;
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, flag);
if (!table->hem[index->buf]) {
@@ -588,8 +588,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
table->hem[i] = hns_roce_alloc_hem(hr_dev,
table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
- (table->lowmem ? GFP_KERNEL :
- GFP_HIGHUSER) | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
@@ -725,9 +724,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int length;
int i, j;
- if (!table->lowmem)
- return NULL;
-
mutex_lock(&table->mutex);
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
@@ -783,8 +779,7 @@ out:
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem)
+ unsigned long obj_size, unsigned long nobj)
{
unsigned long obj_per_chunk;
unsigned long num_hem;
@@ -861,7 +856,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
table->type = type;
table->num_hem = num_hem;
table->obj_size = obj_size;
- table->lowmem = use_lowmem;
mutex_init(&table->mutex);
return 0;
@@ -932,7 +926,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table,
i * table->table_chunk_size / table->obj_size, 0))
- dev_err(dev, "Clear HEM base address failed.\n");
+ dev_err(dev, "clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
}
@@ -986,7 +980,7 @@ struct hns_roce_hem_head {
static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
- bool exist_bt, int bt_level)
+ bool exist_bt)
{
struct hns_roce_hem_item *hem;
@@ -1195,7 +1189,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
start_aligned = (distance / step) * step + r->offset;
end = min_t(int, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
- true, level);
+ true);
if (!cur) {
ret = -ENOMEM;
goto err_exit;
@@ -1247,7 +1241,7 @@ alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
/* indicate to last region */
r = &regions[region_cnt - 1];
hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
- ba_num, true, 0);
+ ba_num, true);
if (!hem)
return ERR_PTR(-ENOMEM);
@@ -1264,7 +1258,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem;
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
- r->count, false, 0);
+ r->count, false);
if (!hem)
return -ENOMEM;
@@ -1421,7 +1415,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
&hem_list->btm_bt);
if (ret) {
dev_err(hr_dev->dev,
- "alloc hem trunk fail ret=%d!\n", ret);
+ "alloc hem trunk fail ret = %d!\n", ret);
goto err_alloc;
}
}
@@ -1430,7 +1424,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
region_cnt);
if (ret)
- dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
+ dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
else
return 0;
@@ -1468,19 +1462,17 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr)
+ int offset, int *mtt_cnt)
{
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
- u64 phy_base = 0;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
- phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
nr = hem->end + 1 - offset;
break;
}
@@ -1489,8 +1481,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
if (mtt_cnt)
*mtt_cnt = nr;
- if (phy_addr)
- *phy_addr = phy_base;
-
return cpu_base;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 2d84a6b3f05d..7d23d3c51da4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -111,8 +111,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
dma_addr_t *dma_handle);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem);
+ unsigned long obj_size, unsigned long nobj);
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
@@ -132,7 +131,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr);
+ int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c780646bd60a..1ead35fb031b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -193,8 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
unsigned int *sge_idx, u32 msg_len)
{
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
- unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
- unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
+ unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
unsigned int i = 0;
@@ -222,7 +221,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
if (len <= left_len_in_pg) {
memcpy(dseg, addr, len);
- idx += len / dseg_len;
+ idx += len / HNS_ROCE_SGE_SIZE;
i++;
if (i >= wr->num_sge)
@@ -237,7 +236,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
len -= left_len_in_pg;
addr += left_len_in_pg;
- idx += left_len_in_pg / dseg_len;
+ idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
dseg = hns_roce_get_extend_sge(qp,
idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
@@ -381,7 +380,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
- ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
+ ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
ibqp->qp_type);
return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
@@ -637,7 +636,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
} else {
struct hns_roce_v2_db sq_db = {};
- hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
+ hr_reg_write(&sq_db, DB_TAG, qp->qpn);
hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
hr_reg_write(&sq_db, DB_PI, qp->sq.head);
hr_reg_write(&sq_db, DB_SL, qp->sl);
@@ -1406,20 +1405,20 @@ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after reset.\n");
+ "func clear success after reset.\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
@@ -1431,21 +1430,21 @@ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after sw reset\n");
+ "func clear success after sw reset\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+ dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
}
static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
@@ -1458,7 +1457,7 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
- dev_info(hr_dev->dev, "Func clear success after reset.\n");
+ dev_info(hr_dev->dev, "func clear success after reset.\n");
return;
}
@@ -1475,9 +1474,9 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (retval && !flag)
dev_warn(hr_dev->dev,
- "Func clear read failed, ret = %d.\n", retval);
+ "func clear read failed, ret = %d.\n", retval);
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
@@ -1498,7 +1497,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true;
- dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
+ dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
ret);
goto out;
}
@@ -1966,7 +1965,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
- caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1984,7 +1982,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
- caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -2185,13 +2182,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
if (!caps->num_comp_vectors)
- caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
- (u32)priv->handle->rinfo.num_vectors - 2);
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
+ (u32)priv->handle->rinfo.num_vectors -
+ (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
@@ -2272,14 +2270,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
- caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
- caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
caps->cqe_sz = resp_a->cqe_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
@@ -4299,7 +4295,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
- int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4363,7 +4358,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
+ const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4612,7 +4607,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_DQPN);
}
- memcpy(&(context->dmac), dmac, sizeof(u32));
+ memcpy(&context->dmac, dmac, sizeof(u32));
hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0;
hr_reg_clear(qpc_mask, QPC_DMAC_H);
@@ -5014,11 +5009,9 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- modify_qp_init_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -5039,14 +5032,14 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 20.\n");
+ "local ACK timeout shall be 0 to 20.\n");
return false;
}
*timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 31.\n");
+ "local ACK timeout shall be 0 to 31.\n");
return false;
}
}
@@ -5306,9 +5299,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
-static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_v2_qp_context *hr_context)
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
@@ -5318,11 +5310,11 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
- hr_qp->qpn);
+ qpn);
if (ret)
goto out;
- memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
+ memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5352,7 +5344,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto done;
}
- ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret) {
ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL;
@@ -5550,7 +5542,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20);
}
- ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
+ ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
@@ -5773,6 +5765,64 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
+ void *buffer)
+{
+ struct hns_roce_v2_cq_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_CQC, cqn);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying CQ, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
+ void *buffer)
+{
+ struct hns_roce_v2_mpt_entry *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ key_to_hw_index(key));
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying MPT, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
@@ -5781,26 +5831,26 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- ibdev_info(ibdev, "Path migrated succeeded.\n");
+ ibdev_info(ibdev, "path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- ibdev_warn(ibdev, "Path migration failed.\n");
+ ibdev_warn(ibdev, "path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- ibdev_warn(ibdev, "Send queue drained.\n");
+ ibdev_warn(ibdev, "send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
+ ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@@ -5822,7 +5872,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
ibdev_warn(ibdev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- ibdev_warn(ibdev, "Function level reset.\n");
+ ibdev_warn(ibdev, "function level reset.\n");
break;
case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
ibdev_err(ibdev, "xrc domain violation error.\n");
@@ -5846,12 +5896,12 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
if (!irq_work)
return;
- INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
+ INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
irq_work->queue_num = queue_num;
- queue_work(hr_dev->irq_workq, &(irq_work->work));
+ queue_work(hr_dev->irq_workq, &irq_work->work);
}
static void update_eq_db(struct hns_roce_eq *eq)
@@ -5941,7 +5991,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_FLR:
break;
default:
- dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
break;
}
@@ -6011,7 +6061,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
/* Completion event interrupt */
int_work = hns_roce_v2_ceq_int(hr_dev, eq);
else
- /* Asychronous event interrupt */
+ /* Asynchronous event interrupt */
int_work = hns_roce_v2_aeq_int(hr_dev, eq);
return IRQ_RETVAL(int_work);
@@ -6332,7 +6382,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
0);
if (err)
- dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
+ dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
return err;
}
@@ -6421,7 +6471,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]);
if (ret) {
- dev_err(hr_dev->dev, "Request irq error!\n");
+ dev_err(hr_dev->dev, "request irq error!\n");
goto err_request_failed;
}
}
@@ -6574,10 +6624,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(eq_table->eq);
}
-static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
- .query_cqc_info = hns_roce_v2_query_cqc_info,
-};
-
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
@@ -6618,6 +6664,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
+ .query_cqc = hns_roce_v2_query_cqc,
+ .query_qpc = hns_roce_v2_query_qpc,
+ .query_mpt = hns_roce_v2_query_mpt,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6649,7 +6698,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
- hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
@@ -6845,7 +6893,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
} else {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
- dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ dev_info(dev, "reset done, RoCE client reinit finished.\n");
}
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 64797109bab6..b11579027e82 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -46,7 +46,6 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
-#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
@@ -55,7 +54,6 @@
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
-#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -65,7 +63,6 @@
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
-#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
@@ -406,6 +403,7 @@ enum hns_roce_v2_qp_state {
struct hns_roce_v2_qp_context_ex {
__le32 data[64];
};
+
struct hns_roce_v2_qp_context {
__le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba;
@@ -758,7 +756,8 @@ struct hns_roce_v2_mpt_entry {
#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
-#define MPT_LEN MPT_FIELD_LOC(191, 128)
+#define MPT_LEN_L MPT_FIELD_LOC(159, 128)
+#define MPT_LEN_H MPT_FIELD_LOC(191, 160)
#define MPT_LKEY MPT_FIELD_LOC(223, 192)
#define MPT_VA MPT_FIELD_LOC(287, 224)
#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
@@ -1173,7 +1172,7 @@ struct hns_roce_query_pf_caps_a {
__le16 max_sq_sg;
__le16 max_sq_inline;
__le16 max_rq_sg;
- __le32 max_extend_sg;
+ __le32 rsv0;
__le16 num_qpc_timer;
__le16 num_cqc_timer;
__le16 max_srq_sges;
@@ -1181,7 +1180,7 @@ struct hns_roce_query_pf_caps_a {
u8 num_other_vectors;
u8 max_sq_desc_sz;
u8 max_rq_desc_sz;
- u8 max_srq_desc_sz;
+ u8 rsv1;
u8 cqe_sz;
};
@@ -1462,9 +1461,6 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
deleted file mode 100644
index f7a75a7cda74..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-// Copyright (c) 2019 Hisilicon Limited.
-
-#include "hnae3.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hw_v2.h"
-
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer)
-{
- struct hns_roce_v2_cq_context *cq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- cq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC,
- cqn);
- if (ret) {
- dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
- goto err_mailbox;
- }
-
- memcpy(buffer, cq_context, sizeof(*cq_context));
-
-err_mailbox:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 4ccb217b2841..dcf89689a4c6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -97,7 +97,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "Can't find netdev on port(%u)!\n", port);
+ dev_err(dev, "can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -239,7 +239,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "Find netdev %u failed!\n", port);
+ dev_err(dev, "find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -515,7 +515,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
- .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
@@ -566,6 +565,15 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
};
+static const struct ib_device_ops hns_roce_dev_restrack_ops = {
+ .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
+};
+
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -605,6 +613,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (!hr_dev->iboe.netdevs[i])
continue;
@@ -650,17 +659,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
- hr_dev->caps.num_mtpts, 1);
+ hr_dev->caps.num_mtpts);
if (ret) {
- dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
+ dev_err(dev, "failed to init MTPT context memory, aborting.\n");
return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init QP context memory, aborting.\n");
+ dev_err(dev, "failed to init QP context memory, aborting.\n");
goto err_unmap_dmpt;
}
@@ -668,9 +677,9 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_IRRL,
hr_dev->caps.irrl_entry_sz *
hr_dev->caps.max_qp_init_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
+ dev_err(dev, "failed to init irrl_table memory, aborting.\n");
goto err_unmap_qp;
}
@@ -680,19 +689,19 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_TRRL,
hr_dev->caps.trrl_entry_sz *
hr_dev->caps.max_qp_dest_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init trrl_table memory, aborting.\n");
+ "failed to init trrl_table memory, aborting.\n");
goto err_unmap_irrl;
}
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
- hr_dev->caps.num_cqs, 1);
+ hr_dev->caps.num_cqs);
if (ret) {
- dev_err(dev, "Failed to init CQ context memory, aborting.\n");
+ dev_err(dev, "failed to init CQ context memory, aborting.\n");
goto err_unmap_trrl;
}
@@ -700,10 +709,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
- hr_dev->caps.num_srqs, 1);
+ hr_dev->caps.num_srqs);
if (ret) {
dev_err(dev,
- "Failed to init SRQ context memory, aborting.\n");
+ "failed to init SRQ context memory, aborting.\n");
goto err_unmap_cq;
}
}
@@ -713,10 +722,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
&hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC,
hr_dev->caps.sccc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init SCC context memory, aborting.\n");
+ "failed to init SCC context memory, aborting.\n");
goto err_unmap_srq;
}
}
@@ -725,10 +734,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.qpc_timer_bt_num, 1);
+ hr_dev->caps.qpc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init QPC timer memory, aborting.\n");
+ "failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
@@ -737,10 +746,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
- hr_dev->caps.cqc_timer_bt_num, 1);
+ hr_dev->caps.cqc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init CQC timer memory, aborting.\n");
+ "failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
@@ -749,7 +758,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
HEM_TYPE_GMV,
hr_dev->caps.gmv_entry_sz,
- hr_dev->caps.gmv_entry_num, 1);
+ hr_dev->caps.gmv_entry_num);
if (ret) {
dev_err(dev,
"failed to init gmv table memory, ret = %d\n",
@@ -818,13 +827,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) {
- dev_err(dev, "Failed to allocate priv_uar.\n");
+ dev_err(dev, "failed to allocate priv_uar.\n");
goto err_uar_table_free;
}
ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
- dev_err(dev, "Failed to init qp_table.\n");
+ dev_err(dev, "failed to init qp_table.\n");
goto err_uar_table_free;
}
@@ -837,9 +846,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_init_srq_table(hr_dev);
- }
return 0;
@@ -902,14 +910,14 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
- dev_err(dev, "Init RoCE Command Queue failed!\n");
+ dev_err(dev, "init RoCE Command Queue failed!\n");
return ret;
}
}
ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
- dev_err(dev, "Get RoCE engine profile failed!\n");
+ dev_err(dev, "get RoCE engine profile failed!\n");
goto error_failed_cmd_init;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 867972c2a894..845ac7d3831f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -190,7 +190,7 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (mr == NULL)
+ if (!mr)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
@@ -249,7 +249,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
return &mr->ibmr;
@@ -586,7 +585,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
while (offset < end && npage < max_count) {
count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
- offset, &count, NULL);
+ offset, &count);
if (!mtts)
return -ENOBUFS;
@@ -835,7 +834,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
- &mtt_count, NULL);
+ &mtt_count);
if (!mtts || !mtt_count)
goto done;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 7bee7f6c5e70..f0bd82a18069 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -56,7 +56,7 @@ static void flush_work_handle(struct work_struct *work)
if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
if (ret)
- dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+ dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
ret);
}
@@ -105,7 +105,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
- dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ dev_warn(dev, "async event for bogus QP %08x\n", qpn);
return;
}
@@ -218,7 +218,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
num = 1;
- hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
@@ -234,8 +233,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
qp_table->bank[bankid].inuse++;
mutex_unlock(&qp_table->bank_mutex);
-
- hr_qp->doorbell_qpn = (u32)num;
}
hr_qp->qpn = num;
@@ -278,7 +275,7 @@ static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret)
- dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
+ dev_err(hr_dev->dev, "failed to xa store for QPC\n");
else
/* add QP to device's QP list for softwc */
add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
@@ -299,14 +296,14 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get QPC table\n");
+ dev_err(dev, "failed to get QPC table\n");
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get IRRL table\n");
+ dev_err(dev, "failed to get IRRL table\n");
goto err_put_qp;
}
@@ -315,7 +312,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get TRRL table\n");
+ dev_err(dev, "failed to get TRRL table\n");
goto err_put_irrl;
}
}
@@ -325,7 +322,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get SCC CTX table\n");
+ dev_err(dev, "failed to get SCC CTX table\n");
goto err_put_trrl;
}
}
@@ -1206,7 +1203,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
if (ret)
- ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
+ ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 24a154d64630..989a2af2e938 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -9,91 +9,223 @@
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
-static int hns_roce_fill_cq(struct sk_buff *msg,
- struct hns_roce_v2_cq_context *context)
+#define MAX_ENTRY_NUM 256
+
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
- if (rdma_nl_put_driver_u32(msg, "state",
- hr_reg_read(context, CQC_ARM_ST)))
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ceqn",
- hr_reg_read(context, CQC_CEQN)))
+ if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cqn",
- hr_reg_read(context, CQC_CQN)))
+ if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
goto err;
- if (rdma_nl_put_driver_u32(msg, "hopnum",
- hr_reg_read(context, CQC_CQE_HOP_NUM)))
+ if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
goto err;
- if (rdma_nl_put_driver_u32(msg, "pi",
- hr_reg_read(context, CQC_CQ_PRODUCER_IDX)))
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct hns_roce_v2_cq_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_cqc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
+ data[offset++] = hr_reg_read(&context, CQC_SHIFT);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
+ data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
+ data[offset++] = hr_reg_read(&context, CQC_CEQN);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ci",
- hr_reg_read(context, CQC_CQ_CONSUMER_IDX)))
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "coalesce",
- hr_reg_read(context, CQC_CQ_MAX_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "period",
- hr_reg_read(context, CQC_CQ_PERIOD)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cnt",
- hr_reg_read(context, CQC_CQE_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
goto err;
+ nla_nest_end(msg, table_attr);
+
return 0;
err:
+ nla_nest_cancel(msg, table_attr);
+
return -EMSGSIZE;
}
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq)
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct hns_roce_v2_cq_context *context;
- struct nlattr *table_attr;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct hns_roce_v2_qp_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
int ret;
- if (!hr_dev->dfx->query_cqc_info)
+ if (!hr_dev->hw->query_qpc)
return -EINVAL;
- context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret)
- goto err;
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, QPC_QP_ST);
+ data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
+ data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
+ data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
+ data[offset++] = hr_reg_read(&context, QPC_SRQN);
+ data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
+ data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_RQWS);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr) {
- ret = -EMSGSIZE;
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
goto err;
- }
- if (hns_roce_fill_cq(msg, context)) {
- ret = -EMSGSIZE;
- goto err_cancel_table;
- }
+ if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
+ goto err;
nla_nest_end(msg, table_attr);
- kfree(context);
return 0;
-err_cancel_table:
- nla_nest_cancel(msg, table_attr);
err:
- kfree(context);
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct hns_roce_v2_mpt_entry context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_mpt)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, MPT_ST);
+ data[offset++] = hr_reg_read(&context, MPT_PD);
+ data[offset++] = hr_reg_read(&context, MPT_LKEY);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_L);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_H);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index e03e03082a5f..c1906cab5c8a 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -314,6 +314,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_INVALID_REQUEST 0x0223
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 4f132c6fb653..ab246447520b 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
switch (info->ae_id) {
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- fallthrough;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
qp->flush_code = FLUSH_PROT_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->flush_code = FLUSH_LOC_LEN_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- case IRDMA_AE_IB_INVALID_REQUEST:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp->flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
break;
default:
- qp->flush_code = FLUSH_FATAL_ERR;
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
}
}
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 9e7b8ecb137a..517d41a1c289 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -98,6 +98,7 @@ enum irdma_term_mpa_errors {
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_hw_stats_index_32b {
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ddd0ebbdd7d5..2ef61923c926 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -103,6 +103,7 @@ enum irdma_flush_opcode {
FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 075defaabee5..8dfc9e154d73 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2479,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
}
ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b207f5084eb..a22afbb25bc5 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -299,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_alloc_ucontext_req req;
+ struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL;
@@ -317,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 legacy support with libi40iw */
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
@@ -389,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context)
*/
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -398,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0;
int err;
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
@@ -814,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req;
+ struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
@@ -836,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (err_code)
return err_code;
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return -EINVAL;
+
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
@@ -1120,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@@ -1138,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1374,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1426,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else {
iwqp->ibqp_state = attr->qp_state;
}
- if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
@@ -1466,6 +1491,8 @@ exit:
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -1480,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
int err;
unsigned long flags;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1565,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1662,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
}
}
}
- if (attr_mask & IB_QP_STATE && udata &&
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
@@ -1797,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request;
@@ -1819,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP;
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
if (entries > rf->max_cqe)
return -EINVAL;
@@ -1951,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
@@ -1969,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
+
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
@@ -2746,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
struct ib_udata *udata)
{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc;
@@ -2763,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
region = ib_umem_get(pd->device, start, len, access);
if (IS_ERR(region)) {
@@ -3315,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_RETRY_EXC_ERR;
case FLUSH_MW_BIND_ERR:
return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -4296,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
struct irdma_create_ah_resp uresp;
struct irdma_ah *parent_ah;
int err;
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d13ecbdd4391..a37cfac5e23f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -96,7 +96,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
__be64 mlx4_ib_gen_node_guid(void)
{
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
- return cpu_to_be64(NODE_GUID_HI | prandom_u32());
+ return cpu_to_be64(NODE_GUID_HI | get_random_u32());
}
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 04a67b481608..a40bf58bcdd3 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.length = length;
mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2a2a9e9afc9d..2211a0be16f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -907,6 +907,7 @@ static bool devx_is_whitelist_cmd(void *in)
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -962,6 +963,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -2158,32 +2160,39 @@ err:
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs,
- struct devx_umem *obj)
+ struct devx_umem *obj, u32 access_flags)
{
u64 addr;
size_t size;
- u32 access;
int err;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
return -EFAULT;
- err = uverbs_get_flags32(&access, attrs,
- MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
if (err)
return err;
- err = ib_check_mr_access(&dev->ib_dev, access);
- if (err)
- return err;
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int dmabuf_fd;
+
+ err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
+ if (err)
+ return -EFAULT;
- obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
- if (IS_ERR(obj->umem))
- return PTR_ERR(obj->umem);
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return PTR_ERR(umem_dmabuf);
+ obj->umem = &umem_dmabuf->umem;
+ } else {
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+ }
return 0;
}
@@ -2222,7 +2231,8 @@ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
- struct devx_umem_reg_cmd *cmd)
+ struct devx_umem_reg_cmd *cmd,
+ int access)
{
unsigned long pgsz_bitmap;
unsigned int page_size;
@@ -2271,6 +2281,9 @@ static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, page_offset,
ib_umem_dma_offset(obj->umem, page_size));
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ);
@@ -2288,20 +2301,30 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
int err;
if (!c->devx_uid)
return -EINVAL;
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
if (!obj)
return -ENOMEM;
- err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
if (err)
goto err_obj_free;
- err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
if (err)
goto err_umem_release;
@@ -2833,6 +2856,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
UVERBS_ATTR_TYPE(u64),
UA_MANDATORY),
+ UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
+ UA_OPTIONAL),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 883d7c60143e..c669ef6e47e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -26,7 +26,7 @@
#include <linux/mlx5/eswitch.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
@@ -46,7 +46,6 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -1826,6 +1825,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
+
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 6191aa833ac2..96ffbbaf0a73 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include <linux/jiffies.h>
@@ -152,6 +151,7 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
for (i = 0; i < 8; i++)
mlx5_write64(&mmio_wqe[i * 2],
bf->bfreg->map + bf->offset + i * 8);
+ io_stop_wc();
bf->offset ^= bf->buf_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 62338f44a30e..4a7f7064bd0e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1563,4 +1563,40 @@ static inline bool rt_supported(int ts_cap)
return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
}
+
+/*
+ * PCI Peer to Peer is a trainwreck. If no switch is present then things
+ * sometimes work, depending on the pci_distance_p2p logic for excluding broken
+ * root complexes. However if a switch is present in the path, then things get
+ * really ugly depending on how the switch is setup. This table assumes that the
+ * root complex is strict and is validating that all req/reps are matches
+ * perfectly - so any scenario where it sees only half the transaction is a
+ * failure.
+ *
+ * CR/RR/DT ATS RO P2P
+ * 00X X X OK
+ * 010 X X fails (request is routed to root but root never sees comp)
+ * 011 0 X fails (request is routed to root but root never sees comp)
+ * 011 1 X OK
+ * 10X X 1 OK
+ * 101 X 0 fails (completion is routed to root but root didn't see req)
+ * 110 X 0 SLOW
+ * 111 0 0 SLOW
+ * 111 1 0 fails (completion is routed to root but root didn't see req)
+ * 111 1 1 OK
+ *
+ * Unfortunately we cannot reliably know if a switch is present or what the
+ * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
+ * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
+ *
+ * For now assume if the umem is a dma_buf then it is P2P.
+ */
+static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
+ struct ib_umem *umem, int access_flags)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
+ return false;
+ return access_flags & IB_ACCESS_RELAXED_ORDERING;
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 129d531bd01b..410cc5fd2523 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -39,9 +39,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
-#include <rdma/ib_verbs.h>
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"
@@ -937,7 +935,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
* cache then synchronously create an uncached one.
*/
if (!ent || ent->limit == 0 ||
- !mlx5r_umr_can_reconfig(dev, 0, access_flags)) {
+ !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
+ mlx5_umem_needs_ats(dev, umem, access_flags)) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex);
@@ -1018,6 +1017,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
+ if (mlx5_umem_needs_ats(dev, umem, access_flags))
+ MLX5_SET(mkc, mkc, ma_translation_mode, 1);
if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(iova, umem->length, mr->page_shift));
@@ -1402,7 +1403,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
}
- mr->ibmr.length = new_umem->length;
mr->ibmr.iova = iova;
mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e305bf1dc6c2..bc97958818bb 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
#include <linux/dma-buf.h>
@@ -795,7 +794,8 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
- if (mmkey->type == MLX5_MKEY_MW)
+ if (mmkey->type == MLX5_MKEY_MW ||
+ mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
return mmkey->key == key;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index bdf5ed38de22..f330ce895d88 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1252,7 +1252,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 265a581133dc..56f06c68f31a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1363,7 +1363,7 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strlcpy(dev->model_number,
+ strscpy(dev->model_number,
hba_attribs->controller_model_number,
sizeof(dev->model_number));
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6861c6384f18..9d2dd135b784 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2124,7 +2124,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcpy(msg,
+ strscpy(msg,
"[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index cb2a02d671e2..692b64efad97 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -295,7 +295,7 @@ void qib_free_irq(struct qib_devdata *dd)
* Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
+ * we switch to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 67a1b4562dc2..67923ced6e2d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -95,7 +95,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int ret;
int off;
int i;
- int flags;
dma_addr_t pa;
unsigned int gup_flags;
struct mm_struct *mm;
@@ -132,8 +131,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
goto out;
}
- flags = IOMMU_READ | IOMMU_CACHE;
- flags |= (writable) ? IOMMU_WRITE : 0;
gup_flags = FOLL_WRITE;
gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 59481ae39505..d61f8de7f21c 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -15,7 +15,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("RDMA Verbs Transport Library");
-static int rvt_init(void)
+static int __init rvt_init(void)
{
int ret = rvt_driver_cq_init();
@@ -26,7 +26,7 @@ static int rvt_init(void)
}
module_init(rvt_init);
-static void rvt_cleanup(void)
+static void __exit rvt_cleanup(void)
{
rvt_cq_exit();
}
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index e03af3012590..46bb07c5c4df 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -151,18 +151,8 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
payload_size(pkt) + bth_pad(pkt));
icrc = ~icrc;
- if (unlikely(icrc != pkt_icrc)) {
- if (skb->protocol == htons(ETH_P_IPV6))
- pr_warn_ratelimited("bad ICRC from %pI6c\n",
- &ipv6_hdr(skb)->saddr);
- else if (skb->protocol == htons(ETH_P_IP))
- pr_warn_ratelimited("bad ICRC from %pI4\n",
- &ip_hdr(skb)->saddr);
- else
- pr_warn_ratelimited("bad ICRC from unknown\n");
-
+ if (unlikely(icrc != pkt_icrc))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 22f6cc31d1d6..c2a5c8814a48 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 850b80f5ad8b..502e9ada99b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < mr->iova || length > mr->length ||
- iova > mr->iova + mr->length - length)
+ if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
+ iova > mr->ibmr.iova + mr->ibmr.length - length)
return -EFAULT;
return 0;
@@ -103,17 +103,16 @@ err1:
return -ENOMEM;
}
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
+void rxe_mr_init_dma(int access, struct rxe_mr *mr)
{
rxe_mr_init(access, mr);
- mr->ibmr.pd = &pd->ibpd;
mr->access = access;
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_DMA;
}
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
struct rxe_map **map;
@@ -125,7 +124,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int err;
int i;
- umem = ib_umem_get(pd->ibpd.device, start, length, access);
+ umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
pr_warn("%s: Unable to pin memory region err = %d\n",
__func__, (int)PTR_ERR(umem));
@@ -175,12 +174,8 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
}
}
- mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
- mr->length = length;
- mr->iova = iova;
- mr->va = start;
mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
@@ -197,7 +192,7 @@ err_out:
return err;
}
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
{
int err;
@@ -208,7 +203,6 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
if (err)
goto err1;
- mr->ibmr.pd = &pd->ibpd;
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
mr->type = IB_MR_TYPE_MEM_REG;
@@ -222,7 +216,7 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- size_t offset = iova - mr->iova + mr->offset;
+ size_t offset = iova - mr->ibmr.iova + mr->offset;
int map_index;
int buf_index;
u64 length;
@@ -605,7 +599,7 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->access = access;
mr->lkey = key;
mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
- mr->iova = wqe->wr.wr.reg.mr->iova;
+ mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 104993801a80..902b7df7aaed 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -114,15 +114,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->iova + mr->length)))) {
+ (mr->ibmr.iova + mr->ibmr.length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c53f4529f098..35f327b9d4b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -145,7 +145,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (skb_linearize(skb)) {
- pr_err("skb_linearize failed\n");
ib_device_put(&rxe->ib_dev);
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 516bf9b95e48..a62bab88415c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -19,34 +19,34 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
int has_srq)
{
if (cap->max_send_wr > rxe->attr.max_qp_wr) {
- pr_warn("invalid send wr = %d > %d\n",
- cap->max_send_wr, rxe->attr.max_qp_wr);
+ pr_debug("invalid send wr = %u > %d\n",
+ cap->max_send_wr, rxe->attr.max_qp_wr);
goto err1;
}
if (cap->max_send_sge > rxe->attr.max_send_sge) {
- pr_warn("invalid send sge = %d > %d\n",
- cap->max_send_sge, rxe->attr.max_send_sge);
+ pr_debug("invalid send sge = %u > %d\n",
+ cap->max_send_sge, rxe->attr.max_send_sge);
goto err1;
}
if (!has_srq) {
if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
- pr_warn("invalid recv wr = %d > %d\n",
- cap->max_recv_wr, rxe->attr.max_qp_wr);
+ pr_debug("invalid recv wr = %u > %d\n",
+ cap->max_recv_wr, rxe->attr.max_qp_wr);
goto err1;
}
if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
- pr_warn("invalid recv sge = %d > %d\n",
- cap->max_recv_sge, rxe->attr.max_recv_sge);
+ pr_debug("invalid recv sge = %u > %d\n",
+ cap->max_recv_sge, rxe->attr.max_recv_sge);
goto err1;
}
}
if (cap->max_inline_data > rxe->max_inline_data) {
- pr_warn("invalid max inline data = %d > %d\n",
- cap->max_inline_data, rxe->max_inline_data);
+ pr_debug("invalid max inline data = %u > %d\n",
+ cap->max_inline_data, rxe->max_inline_data);
goto err1;
}
@@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
}
if (!init->recv_cq || !init->send_cq) {
- pr_warn("missing cq\n");
+ pr_debug("missing cq\n");
goto err1;
}
@@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
if (init->qp_type == IB_QPT_GSI) {
if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
- pr_warn("invalid port = %d\n", port_num);
+ pr_debug("invalid port = %d\n", port_num);
goto err1;
}
port = &rxe->port;
if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
- pr_warn("GSI QP exists for port %d\n", port_num);
+ pr_debug("GSI QP exists for port %d\n", port_num);
goto err1;
}
}
@@ -242,9 +242,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->req_pkts);
- rxe_init_task(rxe, &qp->req.task, qp,
+ rxe_init_task(&qp->req.task, qp,
rxe_requester, "req");
- rxe_init_task(rxe, &qp->comp.task, qp,
+ rxe_init_task(&qp->comp.task, qp,
rxe_completer, "comp");
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
@@ -292,7 +292,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->resp_pkts);
- rxe_init_task(rxe, &qp->resp.task, qp,
+ rxe_init_task(&qp->resp.task, qp,
rxe_responder, "resp");
qp->resp.opcode = OPCODE_NONE;
@@ -402,7 +402,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
- pr_warn("invalid mask or state for qp\n");
+ pr_debug("invalid mask or state for qp\n");
goto err1;
}
@@ -416,7 +416,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_PORT) {
if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
- pr_warn("invalid port %d\n", attr->port_num);
+ pr_debug("invalid port %d\n", attr->port_num);
goto err1;
}
}
@@ -431,12 +431,12 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
goto err1;
if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
- pr_warn("invalid alt port %d\n", attr->alt_port_num);
+ pr_debug("invalid alt port %d\n", attr->alt_port_num);
goto err1;
}
if (attr->alt_timeout > 31) {
- pr_warn("invalid QP alt timeout %d > 31\n",
- attr->alt_timeout);
+ pr_debug("invalid QP alt timeout %d > 31\n",
+ attr->alt_timeout);
goto err1;
}
}
@@ -457,17 +457,16 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
- pr_warn("invalid max_rd_atomic %d > %d\n",
- attr->max_rd_atomic,
- rxe->attr.max_qp_rd_atom);
+ pr_debug("invalid max_rd_atomic %d > %d\n",
+ attr->max_rd_atomic,
+ rxe->attr.max_qp_rd_atom);
goto err1;
}
}
if (mask & IB_QP_TIMEOUT) {
if (attr->timeout > 31) {
- pr_warn("invalid QP timeout %d > 31\n",
- attr->timeout);
+ pr_debug("invalid QP timeout %d > 31\n", attr->timeout);
goto err1;
}
}
@@ -797,7 +796,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
- __rxe_do_task(&qp->req.task);
+ if (qp->req.task.func)
+ __rxe_do_task(&qp->req.task);
+
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
@@ -833,8 +834,10 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
free_rd_atomic_resources(qp);
- kernel_sock_shutdown(qp->sk, SHUT_RDWR);
- sock_release(qp->sk);
+ if (qp->sk) {
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
}
/* called when the last reference to the qp is dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index dbd4971039c0..d6dbf5a0058d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -112,23 +112,25 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem)
{
enum queue_type type = q->type;
+ u32 new_prod;
u32 prod;
u32 cons;
if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
return -EINVAL;
- prod = queue_get_producer(new_q, type);
+ new_prod = queue_get_producer(new_q, type);
+ prod = queue_get_producer(q, type);
cons = queue_get_consumer(q, type);
- while (!queue_empty(q, type)) {
- memcpy(queue_addr_from_index(new_q, prod),
+ while ((prod - cons) & q->index_mask) {
+ memcpy(queue_addr_from_index(new_q, new_prod),
queue_addr_from_index(q, cons), new_q->elem_size);
- prod = queue_next_index(new_q, prod);
+ new_prod = queue_next_index(new_q, new_prod);
cons = queue_next_index(q, cons);
}
- new_q->buf->producer_index = prod;
+ new_q->buf->producer_index = new_prod;
q->buf->consumer_index = cons;
/* update private index copies */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index f3ad7b6dbd97..434a693cd4a5 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -16,47 +16,36 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
unsigned int pkt_type;
if (unlikely(!qp->valid))
- goto err1;
+ return -EINVAL;
pkt_type = pkt->opcode & 0xe0;
switch (qp_type(qp)) {
case IB_QPT_RC:
- if (unlikely(pkt_type != IB_OPCODE_RC)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_RC))
+ return -EINVAL;
break;
case IB_QPT_UC:
- if (unlikely(pkt_type != IB_OPCODE_UC)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_UC))
+ return -EINVAL;
break;
case IB_QPT_UD:
case IB_QPT_GSI:
- if (unlikely(pkt_type != IB_OPCODE_UD)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_UD))
+ return -EINVAL;
break;
default:
- pr_warn_ratelimited("unsupported qp type\n");
- goto err1;
+ return -EINVAL;
}
if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp->resp.state != QP_STATE_READY))
- goto err1;
+ return -EINVAL;
} else if (unlikely(qp->req.state < QP_STATE_READY ||
- qp->req.state > QP_STATE_DRAINED)) {
- goto err1;
- }
+ qp->req.state > QP_STATE_DRAINED))
+ return -EINVAL;
return 0;
-
-err1:
- return -EINVAL;
}
static void set_bad_pkey_cntr(struct rxe_port *port)
@@ -84,26 +73,20 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt->pkey_index = 0;
if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
- pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
set_bad_pkey_cntr(port);
- goto err1;
+ return -EINVAL;
}
if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
- pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
- deth_qkey(pkt), qkey, qpn);
set_qkey_viol_cntr(port);
- goto err1;
+ return -EINVAL;
}
}
return 0;
-
-err1:
- return -EINVAL;
}
static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
@@ -112,13 +95,10 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb = PKT_TO_SKB(pkt);
if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
- goto done;
+ return 0;
- if (unlikely(pkt->port_num != qp->attr.port_num)) {
- pr_warn_ratelimited("port %d != qp port %d\n",
- pkt->port_num, qp->attr.port_num);
- goto err1;
- }
+ if (unlikely(pkt->port_num != qp->attr.port_num))
+ return -EINVAL;
if (skb->protocol == htons(ETH_P_IP)) {
struct in_addr *saddr =
@@ -126,19 +106,9 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
- if (ip_hdr(skb)->daddr != saddr->s_addr) {
- pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
- &ip_hdr(skb)->daddr,
- &saddr->s_addr);
- goto err1;
- }
-
- if (ip_hdr(skb)->saddr != daddr->s_addr) {
- pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
- &ip_hdr(skb)->saddr,
- &daddr->s_addr);
- goto err1;
- }
+ if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
+ (ip_hdr(skb)->saddr != daddr->s_addr))
+ return -EINVAL;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *saddr =
@@ -146,24 +116,12 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in6_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
- if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
- pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
- &ipv6_hdr(skb)->daddr, saddr);
- goto err1;
- }
-
- if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
- pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
- &ipv6_hdr(skb)->saddr, daddr);
- goto err1;
- }
+ if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
+ memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
+ return -EINVAL;
}
-done:
return 0;
-
-err1:
- return -EINVAL;
}
static int hdr_check(struct rxe_pkt_info *pkt)
@@ -175,24 +133,18 @@ static int hdr_check(struct rxe_pkt_info *pkt)
int index;
int err;
- if (unlikely(bth_tver(pkt) != BTH_TVER)) {
- pr_warn_ratelimited("bad tver\n");
+ if (unlikely(bth_tver(pkt) != BTH_TVER))
goto err1;
- }
- if (unlikely(qpn == 0)) {
- pr_warn_once("QP 0 not supported");
+ if (unlikely(qpn == 0))
goto err1;
- }
if (qpn != IB_MULTICAST_QPN) {
index = (qpn == 1) ? port->qp_gsi_index : qpn;
qp = rxe_pool_get_index(&rxe->qp_pool, index);
- if (unlikely(!qp)) {
- pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
+ if (unlikely(!qp))
goto err1;
- }
err = check_type_state(rxe, pkt, qp);
if (unlikely(err))
@@ -206,10 +158,8 @@ static int hdr_check(struct rxe_pkt_info *pkt)
if (unlikely(err))
goto err2;
} else {
- if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
- pr_warn_ratelimited("no grh for mcast qpn\n");
+ if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
goto err1;
- }
}
pkt->qp = qp;
@@ -364,10 +314,8 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop;
- if (rxe_chk_dgid(rxe, skb) < 0) {
- pr_warn_ratelimited("failed checking dgid\n");
+ if (rxe_chk_dgid(rxe, skb) < 0)
goto drop;
- }
pkt->opcode = bth_opcode(pkt);
pkt->psn = bth_psn(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index b36ec5c4d5e0..ed5a09e86417 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -809,10 +809,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
- payload, RXE_FROM_MR_OBJ);
- if (err)
- pr_err("Failed copying memory\n");
+ rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
+ payload, RXE_FROM_MR_OBJ);
if (mr)
rxe_put(mr);
@@ -823,10 +821,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err) {
- pr_err("Failed sending RDMA reply.\n");
+ if (err)
return RESPST_ERR_RNR;
- }
res->read.va += payload;
res->read.resid -= payload;
@@ -1028,50 +1024,41 @@ finish:
return RESPST_CLEANUP;
}
-static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+
+static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
+ int opcode, const char *msg)
{
- int err = 0;
+ int err;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
- 0, psn, syndrome);
- if (!skb) {
- err = -ENOMEM;
- goto err1;
- }
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
+ if (!skb)
+ return -ENOMEM;
err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err)
- pr_err_ratelimited("Failed sending ack\n");
+ pr_err_ratelimited("Failed sending %s\n", msg);
-err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
- int err = 0;
- struct rxe_pkt_info ack_pkt;
- struct sk_buff *skb;
-
- skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
- 0, psn, syndrome);
- if (!skb) {
- err = -ENOMEM;
- goto out;
- }
+ return send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
+}
- err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err)
- pr_err_ratelimited("Failed sending atomic ack\n");
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+{
+ int ret = send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
/* have to clear this since it is used to trigger
* long read replies
*/
qp->resp.res = NULL;
-out:
- return err;
+ return ret;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 2248cf33d776..ec2b7de1c497 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -94,10 +94,9 @@ void rxe_do_task(struct tasklet_struct *t)
task->ret = ret;
}
-int rxe_init_task(void *obj, struct rxe_task *task,
+int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name)
{
- task->obj = obj;
task->arg = arg;
task->func = func;
snprintf(task->name, sizeof(task->name), "%s", name);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index 11d183fd3338..7f612a1c68a7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -19,7 +19,6 @@ enum {
* called again.
*/
struct rxe_task {
- void *obj;
struct tasklet_struct tasklet;
int state;
spinlock_t state_lock; /* spinlock for task state */
@@ -35,7 +34,7 @@ struct rxe_task {
* arg => parameter to pass to fcn
* func => function to call until it returns != 0
*/
-int rxe_init_task(void *obj, struct rxe_task *task,
+int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name);
/* cleanup task */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index e264cf69bf55..88825edc7dce 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -262,7 +262,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id;
- recv_wqe->num_sge = num_sge;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -526,7 +525,6 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
- wr->num_sge = ibwr->num_sge;
wr->opcode = ibwr->opcode;
wr->send_flags = ibwr->send_flags;
@@ -903,7 +901,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
return ERR_PTR(-ENOMEM);
rxe_get(pd);
- rxe_mr_init_dma(pd, access, mr);
+ mr->ibmr.pd = ibpd;
+
+ rxe_mr_init_dma(access, mr);
rxe_finalize(mr);
return &mr->ibmr;
@@ -928,8 +928,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- err = rxe_mr_init_user(pd, start, length, iova, access, mr);
+ err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err)
goto err3;
@@ -938,7 +939,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
return &mr->ibmr;
err3:
- rxe_put(pd);
rxe_cleanup(mr);
err2:
return ERR_PTR(err);
@@ -962,8 +962,9 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
}
rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- err = rxe_mr_init_fast(pd, max_num_sg, mr);
+ err = rxe_mr_init_fast(max_num_sg, mr);
if (err)
goto err2;
@@ -972,7 +973,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return &mr->ibmr;
err2:
- rxe_put(pd);
rxe_cleanup(mr);
err1:
return ERR_PTR(err);
@@ -1007,12 +1007,9 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- mr->va = ibmr->iova;
- mr->iova = ibmr->iova;
- mr->length = ibmr->length;
mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1;
- mr->offset = mr->iova & mr->page_mask;
+ mr->offset = ibmr->iova & mr->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 96af3e054f4d..5f5cbfcb3569 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -305,9 +305,6 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
- u64 va;
- u64 iova;
- size_t length;
u32 offset;
int access;
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index 1b5105cbabae..81b70a3eeb87 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,7 +1,10 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C
+ depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
+ select LIBCRC32C
+ select CRYPTO
+ select CRYPTO_CRC32C
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index df03d84c6868..2f3a9cda3850 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -418,6 +418,7 @@ struct siw_qp {
struct ib_qp base_qp;
struct siw_device *sdev;
struct kref ref;
+ struct completion qp_free;
struct list_head devq;
int tx_cpu;
struct siw_qp_attrs attrs;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 7e01f2438afc..e6f634971228 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1342,6 +1342,6 @@ void siw_free_qp(struct kref *ref)
vfree(qp->orq);
siw_put_tx_cpu(qp->tx_cpu);
-
+ complete(&qp->qp_free);
atomic_dec(&sdev->num_qp);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 875ea6f1b04a..fd721cc19682 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -961,27 +961,28 @@ out:
static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
{
struct sk_buff *skb = srx->skb;
+ int avail = min(srx->skb_new, srx->fpdu_part_rem);
u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
__wsum crc_in, crc_own = 0;
siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
srx->fpdu_part_rem, srx->skb_new, srx->pad);
- if (srx->skb_new < srx->fpdu_part_rem)
- return -EAGAIN;
-
- skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
+ skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
- if (srx->mpa_crc_hd && srx->pad)
- crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
+ srx->skb_new -= avail;
+ srx->skb_offset += avail;
+ srx->skb_copied += avail;
+ srx->fpdu_part_rem -= avail;
- srx->skb_new -= srx->fpdu_part_rem;
- srx->skb_offset += srx->fpdu_part_rem;
- srx->skb_copied += srx->fpdu_part_rem;
+ if (srx->fpdu_part_rem)
+ return -EAGAIN;
if (!srx->mpa_crc_hd)
return 0;
+ if (srx->pad)
+ crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
/*
* CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order.
@@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
* completely received.
*/
if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
- bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
+ int hdrlen = iwarp_pktinfo[opcode].hdr_len;
- if (srx->skb_new < bytes)
- return -EAGAIN;
+ bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
srx->skb_new -= bytes;
srx->skb_offset += bytes;
srx->skb_copied += bytes;
+
+ if (srx->fpdu_part_rcvd < hdrlen)
+ return -EAGAIN;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 8dedae7ae79e..3e814cfb298c 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -480,6 +480,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags);
+ init_completion(&qp->qp_free);
+
return 0;
err_out_xa:
@@ -624,6 +626,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->scq = qp->rcq = NULL;
siw_qp_put(qp);
+ wait_for_completion(&qp->qp_free);
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index fd9d7f2c4d64..b610d36295bb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -465,7 +465,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
goto err_qp;
}
- psn = prandom_u32() & 0xffffff;
+ psn = get_random_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
@@ -884,8 +884,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
goto err_cm;
}
- ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
- 0);
+ ret = ib_cm_listen(priv->cm.id,
+ cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
if (ret) {
pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index a09ca21f7dff..8af99b18d361 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -65,10 +65,10 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
- strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
+ strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a4904371e2db..ac25fc80fb33 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -742,7 +742,7 @@ void ipoib_flush_paths(struct net_device *dev)
static void path_rec_completion(int status,
struct sa_path_rec *pathrec,
- void *path_ptr)
+ int num_prs, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index 42d557dff19d..29b3d8fce3f5 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -124,8 +124,8 @@ static struct vnic_stats vnic_gstrings_stats[] = {
static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
+ strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
index 3898509be270..5227e7788e1f 100644
--- a/drivers/infiniband/ulp/rtrs/Makefile
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later
+CFLAGS_rtrs-clt-trace.o = -I$(src)
+
rtrs-client-y := rtrs-clt.o \
rtrs-clt-stats.o \
- rtrs-clt-sysfs.o
+ rtrs-clt-sysfs.o \
+ rtrs-clt-trace.o
+
+CFLAGS_rtrs-srv-trace.o = -I$(src)
rtrs-server-y := rtrs-srv.o \
rtrs-srv-stats.o \
- rtrs-srv-sysfs.o
+ rtrs-srv-sysfs.o \
+ rtrs-srv-trace.o
rtrs-core-y := rtrs.o
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
new file mode 100644
index 000000000000..f14fa1f36ce8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-clt.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-clt-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
new file mode 100644
index 000000000000..7738e2676855
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_clt
+
+#if !defined(_TRACE_RTRS_CLT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_CLT_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_clt_path;
+struct rtrs_clt_sess;
+
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING_ERR);
+TRACE_DEFINE_ENUM(RTRS_CLT_RECONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSED);
+TRACE_DEFINE_ENUM(RTRS_CLT_DEAD);
+
+#define show_rtrs_clt_state(x) \
+ __print_symbolic(x, \
+ { RTRS_CLT_CONNECTING, "CONNECTING" }, \
+ { RTRS_CLT_CONNECTING_ERR, "CONNECTING_ERR" }, \
+ { RTRS_CLT_RECONNECTING, "RECONNECTING" }, \
+ { RTRS_CLT_CONNECTED, "CONNECTED" }, \
+ { RTRS_CLT_CLOSING, "CLOSING" }, \
+ { RTRS_CLT_CLOSED, "CLOSED" }, \
+ { RTRS_CLT_DEAD, "DEAD" })
+
+DECLARE_EVENT_CLASS(rtrs_clt_conn_class,
+ TP_PROTO(struct rtrs_clt_path *clt_path),
+
+ TP_ARGS(clt_path),
+
+ TP_STRUCT__entry(
+ __field(int, state)
+ __field(int, reconnect_attempts)
+ __field(int, max_reconnect_attempts)
+ __field(int, fail_cnt)
+ __field(int, success_cnt)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ __entry->state = clt_path->state;
+ __entry->reconnect_attempts = clt_path->reconnect_attempts;
+ __entry->max_reconnect_attempts = clt->max_reconnect_attempts;
+ __entry->fail_cnt = clt_path->stats->reconnects.fail_cnt;
+ __entry->success_cnt = clt_path->stats->reconnects.successful_cnt;
+ memcpy(__entry->sessname, kobject_name(&clt_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("RTRS-CLT: sess='%s' state=%s attempts='%d' max-attempts='%d' fail='%d' success='%d'",
+ __entry->sessname,
+ show_rtrs_clt_state(__entry->state),
+ __entry->reconnect_attempts,
+ __entry->max_reconnect_attempts,
+ __entry->fail_cnt,
+ __entry->success_cnt
+ )
+);
+
+#define DEFINE_CLT_CONN_EVENT(name) \
+DEFINE_EVENT(rtrs_clt_conn_class, rtrs_##name, \
+ TP_PROTO(struct rtrs_clt_path *clt_path), \
+ TP_ARGS(clt_path))
+
+DEFINE_CLT_CONN_EVENT(clt_reconnect_work);
+DEFINE_CLT_CONN_EVENT(clt_close_conns);
+DEFINE_CLT_CONN_EVENT(rdma_error_recovery);
+
+#endif /* _TRACE_RTRS_CLT_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-clt-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 449904dac0a9..8546b8816524 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -16,6 +16,7 @@
#include "rtrs-clt.h"
#include "rtrs-log.h"
+#include "rtrs-clt-trace.h"
#define RTRS_CONNECT_TIMEOUT_MS 30000
/*
@@ -53,7 +54,10 @@ static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
rcu_read_lock();
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
- connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
+ connected = true;
+ break;
+ }
rcu_read_unlock();
return connected;
@@ -302,6 +306,8 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ trace_rtrs_rdma_error_recovery(clt_path);
+
if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
@@ -1511,8 +1517,7 @@ static void rtrs_clt_err_recovery_work(struct work_struct *work)
rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
- prandom_u32() %
- RTRS_RECONNECT_SEED));
+ prandom_u32_max(RTRS_RECONNECT_SEED)));
}
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
@@ -1943,6 +1948,8 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
+ trace_rtrs_clt_close_conns(clt_path);
+
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
@@ -2213,17 +2220,6 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
}
}
-static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
- struct rtrs_clt_path *clt_path,
- struct rtrs_clt_path *next)
-{
- struct rtrs_clt_path **ppcpu_path;
-
- /* Call cmpxchg() without sparse warnings */
- ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
- return clt_path == cmpxchg(ppcpu_path, clt_path, next);
-}
-
static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
@@ -2298,7 +2294,8 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
- if (xchg_paths(ppcpu_path, clt_path, next))
+ if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
+ next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
@@ -2649,6 +2646,8 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
reconnect_dwork);
clt = clt_path->clt;
+ trace_rtrs_clt_reconnect_work(clt_path);
+
if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ac0df734eba8..a2420eecaf5a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -26,11 +26,10 @@
/*
* Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
* and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
+ * So the maximum sess_queue_depth is 65535 (2^16 - 1) in theory
+ * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
* Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65534 and it depends on the system.
+ * somewhere between 1 and 65535 and it depends on the system.
*/
#define MAX_SESS_QUEUE_DEPTH 65535
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
new file mode 100644
index 000000000000..29ca59ceb0dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-srv-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
new file mode 100644
index 000000000000..587d3e033081
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_srv
+
+#if !defined(_TRACE_RTRS_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_srv_op;
+struct rtrs_srv_con;
+struct rtrs_srv_path;
+
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSED);
+
+#define show_rtrs_srv_state(x) \
+ __print_symbolic(x, \
+ { RTRS_SRV_CONNECTING, "CONNECTING" }, \
+ { RTRS_SRV_CONNECTED, "CONNECTED" }, \
+ { RTRS_SRV_CLOSING, "CLOSING" }, \
+ { RTRS_SRV_CLOSED, "CLOSED" })
+
+TRACE_EVENT(send_io_resp_imm,
+ TP_PROTO(struct rtrs_srv_op *id,
+ bool need_inval,
+ bool always_invalidate,
+ int errno),
+
+ TP_ARGS(id, need_inval, always_invalidate, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, dir)
+ __field(bool, need_inval)
+ __field(bool, always_invalidate)
+ __field(u32, msg_id)
+ __field(int, wr_cnt)
+ __field(u32, signal_interval)
+ __field(int, state)
+ __field(int, errno)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_srv_con *con = id->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ __entry->dir = id->dir;
+ __entry->state = srv_path->state;
+ __entry->errno = errno;
+ __entry->need_inval = need_inval;
+ __entry->always_invalidate = always_invalidate;
+ __entry->msg_id = id->msg_id;
+ __entry->wr_cnt = atomic_read(&con->c.wr_cnt);
+ __entry->signal_interval = s->signal_interval;
+ memcpy(__entry->sessname, kobject_name(&srv_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("sess='%s' state='%s' dir=%s err='%d' inval='%d' glob-inval='%d' msgid='%u' wrcnt='%d' sig-interval='%u'",
+ __entry->sessname,
+ show_rtrs_srv_state(__entry->state),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->errno,
+ __entry->need_inval,
+ __entry->always_invalidate,
+ __entry->msg_id,
+ __entry->wr_cnt,
+ __entry->signal_interval
+ )
+);
+
+#endif /* _TRACE_RTRS_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 4894e7329d88..22d7ba05e9fe 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -16,6 +16,7 @@
#include "rtrs-log.h"
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
+#include "rtrs-srv-trace.h"
MODULE_DESCRIPTION("RDMA Transport Server");
MODULE_LICENSE("GPL");
@@ -57,11 +58,6 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c);
}
-static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
-{
- return container_of(s, struct rtrs_srv_path, s);
-}
-
static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
@@ -375,6 +371,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
}
}
+ trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
+
if (need_inval && always_invalidate) {
wr = &inv_wr;
inv_wr.next = &rwr.wr;
@@ -1024,7 +1022,7 @@ static void process_read(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(msg->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
- ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len,
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
@@ -1077,7 +1075,7 @@ static void process_write(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(req->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
- ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len,
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
rtrs_err_rl(s,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 186a63c217df..2f8a638e36fa 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -91,6 +91,11 @@ struct rtrs_srv_path {
struct rtrs_srv_stats *stats;
};
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
+{
+ return container_of(s, struct rtrs_srv_path, s);
+}
+
struct rtrs_srv_sess {
struct list_head paths_list;
int paths_up;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 60fa0b0160f4..ed324b47d93a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -175,7 +175,7 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
* length error
*/
for (i = 0; i < num_sge; i++)
- if (WARN_ON(sge[i].length == 0))
+ if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
return -EINVAL;
return rtrs_post_send(con->qp, head, &wr.wr, tail);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 5e57a7ccc7fb..b48b53a7c143 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -139,7 +139,6 @@ struct rtrs_srv_ops {
* @priv: Private data set by rtrs_srv_set_sess_priv()
* @id: internal RTRS operation id
- * @dir: READ/WRITE
* @data: Pointer to (bidirectional) rdma memory area:
* - in case of %RTRS_SRV_RDMA_EV_RECV contains
* data sent by the client
@@ -151,7 +150,7 @@ struct rtrs_srv_ops {
* @usrlen: Size of the user message
*/
int (*rdma_ev)(void *priv,
- struct rtrs_srv_op *id, int dir,
+ struct rtrs_srv_op *id,
void *data, size_t datalen, const void *usr,
size_t usrlen);
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d7f69e593a63..1075c2ac8fe2 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -699,7 +699,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
static void srp_path_rec_completion(int status,
struct sa_path_rec *pathrec,
- void *ch_ptr)
+ int num_paths, void *ch_ptr)
{
struct srp_rdma_ch *ch = ch_ptr;
struct srp_target_port *target = ch->target;
@@ -2789,7 +2789,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
+ struct srp_request *req = scsi_cmd_priv(scmnd);
u32 tag;
u16 ch_idx;
struct srp_rdma_ch *ch;
@@ -2797,8 +2797,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req)
- return SUCCESS;
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
@@ -2991,7 +2989,7 @@ static ssize_t local_ib_port_show(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sysfs_emit(buf, "%d\n", target->srp_host->port);
+ return sysfs_emit(buf, "%u\n", target->srp_host->port);
}
static DEVICE_ATTR_RO(local_ib_port);
@@ -3179,11 +3177,16 @@ static void srp_release_dev(struct device *dev)
struct srp_host *host =
container_of(dev, struct srp_host, dev);
- complete(&host->released);
+ kfree(host);
}
+static struct attribute *srp_class_attrs[];
+
+ATTRIBUTE_GROUPS(srp_class);
+
static struct class srp_class = {
.name = "infiniband_srp",
+ .dev_groups = srp_class_groups,
.dev_release = srp_release_dev
};
@@ -3884,12 +3887,19 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sysfs_emit(buf, "%d\n", host->port);
+ return sysfs_emit(buf, "%u\n", host->port);
}
static DEVICE_ATTR_RO(port);
-static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
+static struct attribute *srp_class_attrs[] = {
+ &dev_attr_add_target.attr,
+ &dev_attr_ibdev.attr,
+ &dev_attr_port.attr,
+ NULL
+};
+
+static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
{
struct srp_host *host;
@@ -3899,33 +3909,24 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock);
- init_completion(&host->released);
mutex_init(&host->add_target_mutex);
host->srp_dev = device;
host->port = port;
+ device_initialize(&host->dev);
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
- dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
- port);
-
- if (device_register(&host->dev))
- goto free_host;
- if (device_create_file(&host->dev, &dev_attr_add_target))
- goto err_class;
- if (device_create_file(&host->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(&host->dev, &dev_attr_port))
- goto err_class;
+ if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
+ port))
+ goto put_host;
+ if (device_add(&host->dev))
+ goto put_host;
return host;
-err_class:
- device_unregister(&host->dev);
-
-free_host:
- kfree(host);
-
+put_host:
+ device_del(&host->dev);
+ put_device(&host->dev);
return NULL;
}
@@ -3937,7 +3938,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
char name[IB_DEVICE_NAME_MAX + 8];
- snprintf(name, sizeof(name), "srp-%s-%d",
+ snprintf(name, sizeof(name), "srp-%s-%u",
dev_name(&device->dev), host->port);
device_rename(&host->dev, name);
}
@@ -3949,7 +3950,7 @@ static int srp_add_one(struct ib_device *device)
struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
int mr_page_shift;
- unsigned int p;
+ u32 p;
u64 max_pages_per_mr;
unsigned int flags = 0;
@@ -4031,12 +4032,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
srp_dev = client_data;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
- device_unregister(&host->dev);
/*
- * Wait for the sysfs entry to go away, so that no new
- * target ports can be created.
+ * Remove the add_target sysfs entry so that no new target ports
+ * can be created.
*/
- wait_for_completion(&host->released);
+ device_del(&host->dev);
/*
* Remove all target ports.
@@ -4054,7 +4054,7 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
*/
flush_workqueue(srp_remove_wq);
- kfree(host);
+ put_device(&host->dev);
}
ib_dealloc_pd(srp_dev->pd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 55a575e2cace..00b0068fda20 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -120,11 +120,10 @@ struct srp_device {
*/
struct srp_host {
struct srp_device *srp_dev;
- u8 port;
+ u32 port;
struct device dev;
struct list_head target_list;
spinlock_t target_lock;
- struct completion released;
struct list_head list;
struct mutex add_target_mutex;
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 21cbe30d526f..3c3fae738c3e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1421,7 +1421,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
- memcpy(srp_rsp + 1, sense_data, sense_data_len);
+ memcpy(srp_rsp->data, sense_data, sense_data_len);
}
return sizeof(*srp_rsp) + sense_data_len;
@@ -2300,7 +2300,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto free_recv_ring;
}
- strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ strscpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
@@ -3191,7 +3191,7 @@ static int srpt_add_one(struct ib_device *device)
* if this HCA is gone bad and replaced by different HCA
*/
ret = sdev->cm_id ?
- ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
+ ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid)) :
0;
if (ret < 0) {
pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index fa8d1a466014..16231fe080b0 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -6,9 +6,6 @@
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
-/*
- */
-
/* #define DEBUG */
#include <linux/input.h>
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 8229a9006917..c321cdabd214 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -6,9 +6,6 @@
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
-/*
- */
-
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 11bbd1edfdb4..76ce41e58df0 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -7,9 +7,6 @@
* EMU10k1 - SB Live / Audigy - gameport driver for Linux
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/module.h>
diff --git a/drivers/input/gameport/lightning.c b/drivers/input/gameport/lightning.c
index 87eeb4b5b5b5..2ce717b25a84 100644
--- a/drivers/input/gameport/lightning.c
+++ b/drivers/input/gameport/lightning.c
@@ -7,9 +7,6 @@
* PDPI Lightning 4 gamecard driver for Linux.
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 2f80b7f1b736..91a8cd346e9b 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -8,9 +8,6 @@
* NS558 based standard IBM game port driver for Linux
*/
-/*
- */
-
#include <asm/io.h>
#include <linux/module.h>
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index b45ddb457002..5824bca02e5a 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -746,7 +746,7 @@ static void joydev_cleanup(struct joydev *joydev)
}
/*
- * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * These codes are copied from hid-ids.h, unfortunately there is no common
* usb_ids/bt_ids.h header.
*/
#define USB_VENDOR_ID_SONY 0x054c
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 68475fad177c..fd1827baf27c 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -7,9 +7,6 @@
* FP-Gaming Assassin 3D joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index e0cfdc84763f..c0deff5d4282 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -26,8 +26,23 @@ struct adc_joystick {
struct adc_joystick_axis *axes;
struct iio_channel *chans;
int num_chans;
+ bool polled;
};
+static void adc_joystick_poll(struct input_dev *input)
+{
+ struct adc_joystick *joy = input_get_drvdata(input);
+ int i, val, ret;
+
+ for (i = 0; i < joy->num_chans; i++) {
+ ret = iio_read_channel_raw(&joy->chans[i], &val);
+ if (ret < 0)
+ return;
+ input_report_abs(input, joy->axes[i].code, val);
+ }
+ input_sync(input);
+}
+
static int adc_joystick_handle(const void *data, void *private)
{
struct adc_joystick *joy = private;
@@ -179,6 +194,7 @@ static int adc_joystick_probe(struct platform_device *pdev)
int error;
int bits;
int i;
+ unsigned int poll_interval;
joy = devm_kzalloc(dev, sizeof(*joy), GFP_KERNEL);
if (!joy)
@@ -192,8 +208,25 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
- /* Count how many channels we got. NULL terminated. */
+ error = device_property_read_u32(dev, "poll-interval", &poll_interval);
+ if (error) {
+ /* -EINVAL means the property is absent. */
+ if (error != -EINVAL)
+ return error;
+ } else if (poll_interval == 0) {
+ dev_err(dev, "Unable to get poll-interval\n");
+ return -EINVAL;
+ } else {
+ joy->polled = true;
+ }
+
+ /*
+ * Count how many channels we got. NULL terminated.
+ * Do not check the storage size if using polling.
+ */
for (i = 0; joy->chans[i].indio_dev; i++) {
+ if (joy->polled)
+ continue;
bits = joy->chans[i].channel->scan_type.storagebits;
if (!bits || bits > 16) {
dev_err(dev, "Unsupported channel storage size\n");
@@ -215,23 +248,31 @@ static int adc_joystick_probe(struct platform_device *pdev)
joy->input = input;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->open = adc_joystick_open;
- input->close = adc_joystick_close;
error = adc_joystick_set_axes(dev, joy);
if (error)
return error;
- joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
- if (IS_ERR(joy->buffer)) {
- dev_err(dev, "Unable to allocate callback buffer\n");
- return PTR_ERR(joy->buffer);
- }
+ if (joy->polled) {
+ input_setup_polling(input, adc_joystick_poll);
+ input_set_poll_interval(input, poll_interval);
+ } else {
+ input->open = adc_joystick_open;
+ input->close = adc_joystick_close;
+
+ joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle,
+ joy);
+ if (IS_ERR(joy->buffer)) {
+ dev_err(dev, "Unable to allocate callback buffer\n");
+ return PTR_ERR(joy->buffer);
+ }
- error = devm_add_action_or_reset(dev, adc_joystick_cleanup, joy->buffer);
- if (error) {
- dev_err(dev, "Unable to add action\n");
- return error;
+ error = devm_add_action_or_reset(dev, adc_joystick_cleanup,
+ joy->buffer);
+ if (error) {
+ dev_err(dev, "Unable to add action\n");
+ return error;
+ }
}
input_set_drvdata(input, joy);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index e10d57bf1180..f1a720be458b 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -7,9 +7,6 @@
* Logitech ADI joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 12456a196dc7..3752dc2a2086 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -7,9 +7,6 @@
* Driver for Amiga joysticks for Linux/m68k
*/
-/*
- */
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3088c5b829f0..0c9e172a9818 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -7,9 +7,6 @@
* Analog joystick and gamepad driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 41e1936a847b..7ff78c9388bd 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -7,9 +7,6 @@
* Creative Labs Blaster GamePad Cobra driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 434d265fa2e8..4fba28b1a1e7 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -10,9 +10,6 @@
* Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index d37645e496ff..41d5dac05448 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -11,9 +11,6 @@
* Raphael Assenat
*/
-/*
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 920feba967f6..abefbd1484df 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -7,9 +7,6 @@
* Genius Flight 2000 joystick driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index fe798bc87950..0e86b269a90e 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -7,9 +7,6 @@
* Gravis/Kensington GrIP protocol joystick and gamepad driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 8eeacdb007c1..205eb6f8b84d 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -7,9 +7,6 @@
* Guillemot Digital Interface Protocol driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index ca22d84e5c84..03a9f0829f7e 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -10,9 +10,6 @@
* InterAct digital gamepad/joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 70f63f9550e7..865652a7821d 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -8,9 +8,6 @@
* out of the joystick port into the syslog ...
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/gameport.h>
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index edb8e1982e26..017ef8c6170b 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -7,9 +7,6 @@
* Magellan and Space Mouse 6dof controller driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 8e9672deb1eb..7282301c3ae7 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -7,9 +7,6 @@
* Microsoft SideWinder joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index a85a4f33aea8..fa8ec533cd69 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -11,9 +11,6 @@
* SpaceTec SpaceBall 2003/3003/4000 FLX driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 557171483256..dbbc69f17c89 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -10,9 +10,6 @@
* SpaceTec SpaceOrb 360 and Avenger 6dof controller driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index c20425f52bd8..530de468cb61 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -8,9 +8,6 @@
* Gravis Stinger gamepad driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 7416de84b955..93562ecc0ca1 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -10,9 +10,6 @@
* ThrustMaster DirectConnect (BSP) joystick family driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index dfe7a2cacce2..dfb9c684651f 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -10,9 +10,6 @@
* TurboGraFX parallel port interface driver for Linux.
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/parport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 174c69a188fb..9b6792ac27f1 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -32,9 +32,6 @@
* Arndt Schoenewald <arndt@quelltext.com>
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 42bdbc28d95d..f66bddf145c2 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -7,9 +7,6 @@
* Logitech WingMan Warrior joystick driver for Linux
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 18190b529bca..2959d80f7fdb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -61,6 +61,7 @@
* Later changes can be tracked in SCM.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/rcupdate.h>
@@ -80,6 +81,9 @@
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
#define MAP_SELECT_BUTTON (1 << 3)
+#define MAP_PADDLES (1 << 4)
+#define MAP_PROFILE_BUTTON (1 << 5)
+
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -89,6 +93,17 @@
#define XTYPE_XBOXONE 3
#define XTYPE_UNKNOWN 4
+/* Send power-off packet to xpad360w after holding the mode button for this many
+ * seconds
+ */
+#define XPAD360W_POWEROFF_TIMEOUT 5
+
+#define PKT_XB 0
+#define PKT_XBE1 1
+#define PKT_XBE2_FW_OLD 2
+#define PKT_XBE2_FW_5_EARLY 3
+#define PKT_XBE2_FW_5_11 4
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -111,8 +126,11 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 packet_type;
} xpad_device[] = {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -128,9 +146,11 @@ static const struct xpad_device {
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
- { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
+ { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
@@ -244,6 +264,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -260,6 +281,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
@@ -325,6 +347,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
@@ -334,6 +357,14 @@ static const struct xpad_device {
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -390,6 +421,13 @@ static const signed short xpad_abs_triggers[] = {
-1
};
+/* used when the controller has extra paddle buttons */
+static const signed short xpad_btn_paddles[] = {
+ BTN_TRIGGER_HAPPY5, BTN_TRIGGER_HAPPY6, /* paddle upper right, lower right */
+ BTN_TRIGGER_HAPPY7, BTN_TRIGGER_HAPPY8, /* paddle upper left, lower left */
+ -1 /* terminating entry */
+};
+
/*
* Xbox 360 has a vendor-specific class, so we cannot match it with only
* USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
@@ -419,6 +457,7 @@ static const signed short xpad_abs_triggers[] = {
static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
@@ -429,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
+ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
@@ -450,8 +490,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
+ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ }
};
@@ -473,13 +517,52 @@ struct xboxone_init_packet {
.len = ARRAY_SIZE(_data), \
}
+/*
+ * starting with xbox one, the game input protocol is used
+ * magic numbers are taken from
+ * - https://github.com/xpadneo/gip-dissector/blob/main/src/gip-dissector.lua
+ * - https://github.com/medusalix/xone/blob/master/bus/protocol.c
+ */
+#define GIP_CMD_ACK 0x01
+#define GIP_CMD_IDENTIFY 0x04
+#define GIP_CMD_POWER 0x05
+#define GIP_CMD_AUTHENTICATE 0x06
+#define GIP_CMD_VIRTUAL_KEY 0x07
+#define GIP_CMD_RUMBLE 0x09
+#define GIP_CMD_LED 0x0a
+#define GIP_CMD_FIRMWARE 0x0c
+#define GIP_CMD_INPUT 0x20
+
+#define GIP_SEQ0 0x00
+
+#define GIP_OPT_ACK 0x10
+#define GIP_OPT_INTERNAL 0x20
+
+/*
+ * length of the command payload encoded with
+ * https://en.wikipedia.org/wiki/LEB128
+ * which is a no-op for N < 128
+ */
+#define GIP_PL_LEN(N) (N)
+
+/*
+ * payload specific defines
+ */
+#define GIP_PWR_ON 0x00
+#define GIP_LED_ON 0x01
+
+#define GIP_MOTOR_R BIT(0)
+#define GIP_MOTOR_L BIT(1)
+#define GIP_MOTOR_RT BIT(2)
+#define GIP_MOTOR_LT BIT(3)
+#define GIP_MOTOR_ALL (GIP_MOTOR_R | GIP_MOTOR_L | GIP_MOTOR_RT | GIP_MOTOR_LT)
/*
* This packet is required for all Xbox One pads with 2015
* or later firmware installed (or present from the factory).
*/
-static const u8 xboxone_fw2015_init[] = {
- 0x05, 0x20, 0x00, 0x01, 0x00
+static const u8 xboxone_power_on[] = {
+ GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(1), GIP_PWR_ON
};
/*
@@ -489,7 +572,16 @@ static const u8 xboxone_fw2015_init[] = {
* Bluetooth mode.
*/
static const u8 xboxone_s_init[] = {
- 0x05, 0x20, 0x00, 0x0f, 0x06
+ GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, 0x0f, 0x06
+};
+
+/*
+ * This packet is required to get additional input data
+ * from Xbox One Elite Series 2 (0x045e:0x0b00) pads.
+ * We mostly do this right now to get paddle data
+ */
+static const u8 extra_input_packet_init[] = {
+ 0x4d, 0x10, 0x01, 0x02, 0x07, 0x00
};
/*
@@ -497,9 +589,9 @@ static const u8 xboxone_s_init[] = {
* (0x0e6f:0x0165) to finish initialization and for Hori pads
* (0x0f0d:0x0067) to make the analog sticks work.
*/
-static const u8 xboxone_hori_init[] = {
- 0x01, 0x20, 0x00, 0x09, 0x00, 0x04, 0x20, 0x3a,
- 0x00, 0x00, 0x00, 0x80, 0x00
+static const u8 xboxone_hori_ack_id[] = {
+ GIP_CMD_ACK, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_CMD_IDENTIFY, GIP_OPT_INTERNAL, 0x3a, 0x00, 0x00, 0x00, 0x80, 0x00
};
/*
@@ -507,8 +599,8 @@ static const u8 xboxone_hori_init[] = {
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_init1[] = {
- 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+static const u8 xboxone_pdp_led_on[] = {
+ GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
};
/*
@@ -516,8 +608,8 @@ static const u8 xboxone_pdp_init1[] = {
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_init2[] = {
- 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+static const u8 xboxone_pdp_auth[] = {
+ GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
/*
@@ -525,8 +617,8 @@ static const u8 xboxone_pdp_init2[] = {
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
static const u8 xboxone_rumblebegin_init[] = {
- 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
- 0x1D, 0x1D, 0xFF, 0x00, 0x00
+ GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x1D, 0x1D, 0xFF, 0x00, 0x00
};
/*
@@ -536,8 +628,8 @@ static const u8 xboxone_rumblebegin_init[] = {
* spin up to enough speed to actually vibrate the gamepad.
*/
static const u8 xboxone_rumbleend_init[] = {
- 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00
+ GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/*
@@ -547,13 +639,14 @@ static const u8 xboxone_rumbleend_init[] = {
* packet is going to be sent.
*/
static const struct xboxone_init_packet xboxone_init_packets[] = {
- XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
- XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
- XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_ack_id),
+ XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_ack_id),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_power_on),
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -608,14 +701,17 @@ struct usb_xpad {
int mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
+ int packet_type; /* type of the extended packet */
int pad_nr; /* the order x360 pads were attached */
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
+ time64_t mode_btn_down_ts;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
+static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
/*
* xpad_process_packet
@@ -656,10 +752,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
@@ -668,10 +764,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
}
/* start/back buttons and stick press left/right */
- input_report_key(dev, BTN_START, data[2] & 0x10);
- input_report_key(dev, BTN_SELECT, data[2] & 0x20);
- input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
+ input_report_key(dev, BTN_START, data[2] & BIT(4));
+ input_report_key(dev, BTN_SELECT, data[2] & BIT(5));
+ input_report_key(dev, BTN_THUMBL, data[2] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[2] & BIT(7));
/* "analog" buttons A, B, X, Y */
input_report_key(dev, BTN_A, data[4]);
@@ -683,6 +779,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
+ /* Profile button has a value of 0-3, so it is reported as an axis */
+ if (xpad->mapping & MAP_PROFILE_BUTTON)
+ input_report_abs(dev, ABS_PROFILE, data[34]);
+
input_sync(dev);
}
@@ -706,10 +806,10 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & 0x02);
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
}
/*
@@ -727,21 +827,21 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
}
/* start/back buttons */
- input_report_key(dev, BTN_START, data[2] & 0x10);
- input_report_key(dev, BTN_SELECT, data[2] & 0x20);
+ input_report_key(dev, BTN_START, data[2] & BIT(4));
+ input_report_key(dev, BTN_SELECT, data[2] & BIT(5));
/* stick press left/right */
- input_report_key(dev, BTN_THUMBL, data[2] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[2] & 0x80);
+ input_report_key(dev, BTN_THUMBL, data[2] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[2] & BIT(7));
/* buttons A,B,X,Y,TL,TR and MODE */
- input_report_key(dev, BTN_A, data[3] & 0x10);
- input_report_key(dev, BTN_B, data[3] & 0x20);
- input_report_key(dev, BTN_X, data[3] & 0x40);
- input_report_key(dev, BTN_Y, data[3] & 0x80);
- input_report_key(dev, BTN_TL, data[3] & 0x01);
- input_report_key(dev, BTN_TR, data[3] & 0x02);
- input_report_key(dev, BTN_MODE, data[3] & 0x04);
+ input_report_key(dev, BTN_A, data[3] & BIT(4));
+ input_report_key(dev, BTN_B, data[3] & BIT(5));
+ input_report_key(dev, BTN_X, data[3] & BIT(6));
+ input_report_key(dev, BTN_Y, data[3] & BIT(7));
+ input_report_key(dev, BTN_TL, data[3] & BIT(0));
+ input_report_key(dev, BTN_TR, data[3] & BIT(1));
+ input_report_key(dev, BTN_MODE, data[3] & BIT(2));
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
/* left stick */
@@ -767,6 +867,23 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
}
input_sync(dev);
+
+ /* XBOX360W controllers can't be turned off without driver assistance */
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ if (xpad->mode_btn_down_ts > 0 && xpad->pad_present &&
+ ((ktime_get_seconds() - xpad->mode_btn_down_ts) >=
+ XPAD360W_POWEROFF_TIMEOUT)) {
+ xpad360w_poweroff_controller(xpad);
+ xpad->mode_btn_down_ts = 0;
+ return;
+ }
+
+ /* mode button down/up */
+ if (data[3] & BIT(2))
+ xpad->mode_btn_down_ts = ktime_get_seconds();
+ else
+ xpad->mode_btn_down_ts = 0;
+ }
}
static void xpad_presence_work(struct work_struct *work)
@@ -846,87 +963,154 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
struct input_dev *dev = xpad->dev;
+ bool do_sync = false;
/* the xbox button has its own special report */
- if (data[0] == 0X07) {
+ if (data[0] == GIP_CMD_VIRTUAL_KEY) {
/*
* The Xbox One S controller requires these reports to be
* acked otherwise it continues sending them forever and
* won't report further mode button events.
*/
- if (data[1] == 0x30)
+ if (data[1] == (GIP_OPT_ACK | GIP_OPT_INTERNAL))
xpadone_ack_mode_report(xpad, data[2]);
- input_report_key(dev, BTN_MODE, data[4] & 0x01);
+ input_report_key(dev, BTN_MODE, data[4] & GENMASK(1, 0));
input_sync(dev);
- return;
- }
- /* check invalid packet */
- else if (data[0] != 0X20)
- return;
- /* menu/view buttons */
- input_report_key(dev, BTN_START, data[4] & 0x04);
- input_report_key(dev, BTN_SELECT, data[4] & 0x08);
- if (xpad->mapping & MAP_SELECT_BUTTON)
- input_report_key(dev, KEY_RECORD, data[22] & 0x01);
+ do_sync = true;
+ } else if (data[0] == GIP_CMD_FIRMWARE) {
+ /* Some packet formats force us to use this separate to poll paddle inputs */
+ if (xpad->packet_type == PKT_XBE2_FW_5_11) {
+ /* Mute paddles if controller is in a custom profile slot
+ * Checked by looking at the active profile slot to
+ * verify it's the default slot
+ */
+ if (data[19] != 0)
+ data[18] = 0;
- /* buttons A,B,X,Y */
- input_report_key(dev, BTN_A, data[4] & 0x10);
- input_report_key(dev, BTN_B, data[4] & 0x20);
- input_report_key(dev, BTN_X, data[4] & 0x40);
- input_report_key(dev, BTN_Y, data[4] & 0x80);
+ /* Elite Series 2 split packet paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
- /* digital pad */
- if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
- /* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & 0x04);
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & 0x08);
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & 0x01);
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & 0x02);
- } else {
- input_report_abs(dev, ABS_HAT0X,
- !!(data[5] & 0x08) - !!(data[5] & 0x04));
- input_report_abs(dev, ABS_HAT0Y,
- !!(data[5] & 0x02) - !!(data[5] & 0x01));
- }
-
- /* TL/TR */
- input_report_key(dev, BTN_TL, data[5] & 0x10);
- input_report_key(dev, BTN_TR, data[5] & 0x20);
+ do_sync = true;
+ }
+ } else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
+ /* menu/view buttons */
+ input_report_key(dev, BTN_START, data[4] & BIT(2));
+ input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
+ if (xpad->mapping & MAP_SELECT_BUTTON)
+ input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
+
+ /* buttons A,B,X,Y */
+ input_report_key(dev, BTN_A, data[4] & BIT(4));
+ input_report_key(dev, BTN_B, data[4] & BIT(5));
+ input_report_key(dev, BTN_X, data[4] & BIT(6));
+ input_report_key(dev, BTN_Y, data[4] & BIT(7));
+
+ /* digital pad */
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ /* dpad as buttons (left, right, up, down) */
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & BIT(1));
+ } else {
+ input_report_abs(dev, ABS_HAT0X,
+ !!(data[5] & 0x08) - !!(data[5] & 0x04));
+ input_report_abs(dev, ABS_HAT0Y,
+ !!(data[5] & 0x02) - !!(data[5] & 0x01));
+ }
- /* stick press left/right */
- input_report_key(dev, BTN_THUMBL, data[5] & 0x40);
- input_report_key(dev, BTN_THUMBR, data[5] & 0x80);
+ /* TL/TR */
+ input_report_key(dev, BTN_TL, data[5] & BIT(4));
+ input_report_key(dev, BTN_TR, data[5] & BIT(5));
+
+ /* stick press left/right */
+ input_report_key(dev, BTN_THUMBL, data[5] & BIT(6));
+ input_report_key(dev, BTN_THUMBR, data[5] & BIT(7));
+
+ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
+ /* left stick */
+ input_report_abs(dev, ABS_X,
+ (__s16) le16_to_cpup((__le16 *)(data + 10)));
+ input_report_abs(dev, ABS_Y,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
+
+ /* right stick */
+ input_report_abs(dev, ABS_RX,
+ (__s16) le16_to_cpup((__le16 *)(data + 14)));
+ input_report_abs(dev, ABS_RY,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
+ }
- if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
- /* left stick */
- input_report_abs(dev, ABS_X,
- (__s16) le16_to_cpup((__le16 *)(data + 10)));
- input_report_abs(dev, ABS_Y,
- ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
+ /* triggers left/right */
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ input_report_key(dev, BTN_TL2,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_key(dev, BTN_TR2,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ } else {
+ input_report_abs(dev, ABS_Z,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_abs(dev, ABS_RZ,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ }
- /* right stick */
- input_report_abs(dev, ABS_RX,
- (__s16) le16_to_cpup((__le16 *)(data + 14)));
- input_report_abs(dev, ABS_RY,
- ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
- }
+ /* paddle handling */
+ /* based on SDL's SDL_hidapi_xboxone.c */
+ if (xpad->mapping & MAP_PADDLES) {
+ if (xpad->packet_type == PKT_XBE1) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (memcmp(&data[4], &data[18], 2) != 0)
+ data[32] = 0;
+
+ /* OG Elite Series Controller paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[32] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[32] & BIT(3));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[32] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[32] & BIT(2));
+ } else if (xpad->packet_type == PKT_XBE2_FW_OLD) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (data[19] != 0)
+ data[18] = 0;
+
+ /* Elite Series 2 4.x firmware paddle bits */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ } else if (xpad->packet_type == PKT_XBE2_FW_5_EARLY) {
+ /* Mute paddles if controller has a custom mapping applied.
+ * Checked by comparing the current mapping
+ * config against the factory mapping config
+ */
+ if (data[23] != 0)
+ data[22] = 0;
+
+ /* Elite Series 2 5.x firmware paddle bits
+ * (before the packet was split)
+ */
+ input_report_key(dev, BTN_TRIGGER_HAPPY5, data[22] & BIT(0));
+ input_report_key(dev, BTN_TRIGGER_HAPPY6, data[22] & BIT(1));
+ input_report_key(dev, BTN_TRIGGER_HAPPY7, data[22] & BIT(2));
+ input_report_key(dev, BTN_TRIGGER_HAPPY8, data[22] & BIT(3));
+ }
+ }
- /* triggers left/right */
- if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
- input_report_key(dev, BTN_TL2,
- (__u16) le16_to_cpup((__le16 *)(data + 6)));
- input_report_key(dev, BTN_TR2,
- (__u16) le16_to_cpup((__le16 *)(data + 8)));
- } else {
- input_report_abs(dev, ABS_Z,
- (__u16) le16_to_cpup((__le16 *)(data + 6)));
- input_report_abs(dev, ABS_RZ,
- (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ do_sync = true;
}
- input_sync(dev);
+ if (do_sync)
+ input_sync(dev);
}
static void xpad_irq_in(struct urb *urb)
@@ -1226,8 +1410,8 @@ static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num)
struct xpad_output_packet *packet =
&xpad->out_packets[XPAD_OUT_CMD_IDX];
static const u8 mode_report_ack[] = {
- 0x01, 0x20, 0x00, 0x09, 0x00, 0x07, 0x20, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00
+ GIP_CMD_ACK, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(9),
+ 0x00, GIP_CMD_VIRTUAL_KEY, GIP_OPT_INTERNAL, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00
};
spin_lock_irqsave(&xpad->odata_lock, flags);
@@ -1305,14 +1489,14 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
break;
case XTYPE_XBOXONE:
- packet->data[0] = 0x09; /* activate rumble */
+ packet->data[0] = GIP_CMD_RUMBLE; /* activate rumble */
packet->data[1] = 0x00;
packet->data[2] = xpad->odata_serial++;
- packet->data[3] = 0x09;
+ packet->data[3] = GIP_PL_LEN(9);
packet->data[4] = 0x00;
- packet->data[5] = 0x0F;
- packet->data[6] = 0x00;
- packet->data[7] = 0x00;
+ packet->data[5] = GIP_MOTOR_ALL;
+ packet->data[6] = 0x00; /* left trigger */
+ packet->data[7] = 0x00; /* right trigger */
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
packet->data[10] = 0xFF; /* on period */
@@ -1622,6 +1806,9 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
break;
+ case ABS_PROFILE: /* 4 value profile button (such as on XAC) */
+ input_set_abs_params(input_dev, abs, 0, 4, 0, 0);
+ break;
default:
input_set_abs_params(input_dev, abs, 0, 0, 0, 0);
break;
@@ -1693,6 +1880,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad_btn_pad[i]);
}
+ /* set up paddles if the controller has them */
+ if (xpad->mapping & MAP_PADDLES) {
+ for (i = 0; xpad_btn_paddles[i] >= 0; i++)
+ input_set_capability(input_dev, EV_KEY, xpad_btn_paddles[i]);
+ }
+
/*
* This should be a simple else block. However historically
* xbox360w has mapped DPAD to buttons while xbox360 did not. This
@@ -1714,6 +1907,10 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
}
+ /* setup profile button as an axis with 4 possible values */
+ if (xpad->mapping & MAP_PROFILE_BUTTON)
+ xpad_set_up_abs(input_dev, ABS_PROFILE);
+
error = xpad_init_ff(xpad);
if (error)
goto err_free_input;
@@ -1779,6 +1976,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
if (xpad->xtype == XTYPE_UNKNOWN) {
@@ -1844,6 +2042,38 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, xpad);
+ /* Packet type detection */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x045e) { /* Microsoft controllers */
+ if (le16_to_cpu(udev->descriptor.idProduct) == 0x02e3) {
+ /* The original elite controller always uses the oldest
+ * type of extended packet
+ */
+ xpad->packet_type = PKT_XBE1;
+ } else if (le16_to_cpu(udev->descriptor.idProduct) == 0x0b00) {
+ /* The elite 2 controller has seen multiple packet
+ * revisions. These are tied to specific firmware
+ * versions
+ */
+ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x0500) {
+ /* This is the format that the Elite 2 used
+ * prior to the BLE update
+ */
+ xpad->packet_type = PKT_XBE2_FW_OLD;
+ } else if (le16_to_cpu(udev->descriptor.bcdDevice) <
+ 0x050b) {
+ /* This is the format that the Elite 2 used
+ * prior to the update that split the packet
+ */
+ xpad->packet_type = PKT_XBE2_FW_5_EARLY;
+ } else {
+ /* The split packet format that was introduced
+ * in firmware v5.11
+ */
+ xpad->packet_type = PKT_XBE2_FW_5_11;
+ }
+ }
+ }
+
if (xpad->xtype == XTYPE_XBOX360W) {
/*
* Submit the int URB immediately rather than waiting for open
@@ -1972,7 +2202,6 @@ static struct usb_driver xpad_driver = {
.disconnect = xpad_disconnect,
.suspend = xpad_suspend,
.resume = xpad_resume,
- .reset_resume = xpad_resume,
.id_table = xpad_table,
};
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index d5531179b01f..3f2460e2b095 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -28,9 +28,6 @@
* coder :-(
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a20ee693b22b..00292118b79b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -40,6 +40,9 @@ config KEYBOARD_ADP5520
config KEYBOARD_ADP5588
tristate "ADP5588/87 I2C QWERTY Keypad and IO Expander"
depends on I2C
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a ADP5588/87 attached to your
system I2C bus.
@@ -186,7 +189,7 @@ config KEYBOARD_QT2160
config KEYBOARD_CLPS711X
tristate "CLPS711X Keypad support"
- depends on OF_GPIO && (ARCH_CLPS711X || COMPILE_TEST)
+ depends on ARCH_CLPS711X || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus Logic
@@ -524,6 +527,19 @@ config KEYBOARD_OPENCORES
To compile this driver as a module, choose M here; the
module will be called opencores-kbd.
+config KEYBOARD_PINEPHONE
+ tristate "Pine64 PinePhone Keyboard"
+ depends on I2C && REGULATOR
+ select CRC8
+ select INPUT_MATRIXKMAP
+ help
+ Say Y here to enable support for the keyboard in the Pine64 PinePhone
+ keyboard case. This driver supports the FLOSS firmware available at
+ https://megous.com/git/pinephone-keyboard/
+
+ To compile this driver as a module, choose M here; the
+ module will be called pinephone-keyboard.
+
config KEYBOARD_PXA27x
tristate "PXA27x/PXA3xx keypad support"
depends on PXA27x || PXA3xx || ARCH_MMP
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 721936e90290..5f67196bb2c1 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_KEYBOARD_NSPIRE) += nspire-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
+obj-$(CONFIG_KEYBOARD_PINEPHONE) += pinephone-keyboard.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index e2719737360a..7cd83c8e7110 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,27 +8,163 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ktime.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
-#include <linux/platform_data/adp5588.h>
+#define DEV_ID 0x00 /* Device ID */
+#define CFG 0x01 /* Configuration Register1 */
+#define INT_STAT 0x02 /* Interrupt Status Register */
+#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
+#define KEY_EVENTA 0x04 /* Key Event Register A */
+#define KEY_EVENTB 0x05 /* Key Event Register B */
+#define KEY_EVENTC 0x06 /* Key Event Register C */
+#define KEY_EVENTD 0x07 /* Key Event Register D */
+#define KEY_EVENTE 0x08 /* Key Event Register E */
+#define KEY_EVENTF 0x09 /* Key Event Register F */
+#define KEY_EVENTG 0x0A /* Key Event Register G */
+#define KEY_EVENTH 0x0B /* Key Event Register H */
+#define KEY_EVENTI 0x0C /* Key Event Register I */
+#define KEY_EVENTJ 0x0D /* Key Event Register J */
+#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
+#define UNLOCK1 0x0F /* Unlock Key1 */
+#define UNLOCK2 0x10 /* Unlock Key2 */
+#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
+#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
+#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
+#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
+#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
+#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
+#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
+#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
+#define GPI_EM1 0x20 /* GPI Event Mode 1 */
+#define GPI_EM2 0x21 /* GPI Event Mode 2 */
+#define GPI_EM3 0x22 /* GPI Event Mode 3 */
+#define GPIO_DIR1 0x23 /* GPIO Data Direction */
+#define GPIO_DIR2 0x24 /* GPIO Data Direction */
+#define GPIO_DIR3 0x25 /* GPIO Data Direction */
+#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
+#define DEBOUNCE_DIS1 0x29 /* Debounce Disable */
+#define DEBOUNCE_DIS2 0x2A /* Debounce Disable */
+#define DEBOUNCE_DIS3 0x2B /* Debounce Disable */
+#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
+#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
+#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
+#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
+#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
+#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
+#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
+#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
+#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
+#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
+#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
+#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
+#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
+#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
+#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
+#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
+#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
+#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
+
+#define ADP5588_DEVICE_ID_MASK 0xF
+
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC BIT(7)
+#define ADP5588_GPIEM_CFG BIT(6)
+#define ADP5588_OVR_FLOW_M BIT(5)
+#define ADP5588_INT_CFG BIT(4)
+#define ADP5588_OVR_FLOW_IEN BIT(3)
+#define ADP5588_K_LCK_IM BIT(2)
+#define ADP5588_GPI_IEN BIT(1)
+#define ADP5588_KE_IEN BIT(0)
+
+/* Interrupt Status Register */
+#define ADP5588_CMP2_INT BIT(5)
+#define ADP5588_CMP1_INT BIT(4)
+#define ADP5588_OVR_FLOW_INT BIT(3)
+#define ADP5588_K_LCK_INT BIT(2)
+#define ADP5588_GPI_INT BIT(1)
+#define ADP5588_KE_INT BIT(0)
+
+/* Key Lock and Event Counter Register */
+#define ADP5588_K_LCK_EN BIT(6)
+#define ADP5588_LCK21 0x30
+#define ADP5588_KEC GENMASK(3, 0)
+
+#define ADP5588_MAXGPIO 18
+#define ADP5588_BANK(offs) ((offs) >> 3)
+#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
+
+/* Put one of these structures in i2c_board_info platform_data */
+
+/*
+ * 128 so it fits matrix-keymap maximum number of keys when the full
+ * 10cols * 8rows are used.
+ */
+#define ADP5588_KEYMAPSIZE 128
+
+#define GPI_PIN_ROW0 97
+#define GPI_PIN_ROW1 98
+#define GPI_PIN_ROW2 99
+#define GPI_PIN_ROW3 100
+#define GPI_PIN_ROW4 101
+#define GPI_PIN_ROW5 102
+#define GPI_PIN_ROW6 103
+#define GPI_PIN_ROW7 104
+#define GPI_PIN_COL0 105
+#define GPI_PIN_COL1 106
+#define GPI_PIN_COL2 107
+#define GPI_PIN_COL3 108
+#define GPI_PIN_COL4 109
+#define GPI_PIN_COL5 110
+#define GPI_PIN_COL6 111
+#define GPI_PIN_COL7 112
+#define GPI_PIN_COL8 113
+#define GPI_PIN_COL9 114
+
+#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
+#define GPI_PIN_ROW_END GPI_PIN_ROW7
+#define GPI_PIN_COL_BASE GPI_PIN_COL0
+#define GPI_PIN_COL_END GPI_PIN_COL9
+
+#define GPI_PIN_BASE GPI_PIN_ROW_BASE
+#define GPI_PIN_END GPI_PIN_COL_END
+
+#define ADP5588_ROWS_MAX (GPI_PIN_ROW7 - GPI_PIN_ROW0 + 1)
+#define ADP5588_COLS_MAX (GPI_PIN_COL9 - GPI_PIN_COL0 + 1)
+
+#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
/* Key Event Register xy */
-#define KEY_EV_PRESSED (1 << 7)
-#define KEY_EV_MASK (0x7F)
+#define KEY_EV_PRESSED BIT(7)
+#define KEY_EV_MASK GENMASK(6, 0)
-#define KP_SEL(x) (0xFFFF >> (16 - x)) /* 2^x-1 */
+#define KP_SEL(x) (BIT(x) - 1) /* 2^x-1 */
#define KEYP_MAX_EVENT 10
@@ -40,21 +176,27 @@
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
#define WA_DELAYED_READOUT_TIME 25
+#define ADP5588_INVALID_HWIRQ (~0UL)
+
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
ktime_t irq_time;
unsigned long delay;
+ u32 row_shift;
+ u32 rows;
+ u32 cols;
+ u32 unlock_keys[2];
+ int nkeys_unlock;
unsigned short keycode[ADP5588_KEYMAPSIZE];
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
-#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
u8 dir[3];
-#endif
+ u8 int_en[3];
+ u8 irq_mask[3];
+ u8 pull_dis[3];
};
static int adp5588_read(struct i2c_client *client, u8 reg)
@@ -72,8 +214,7 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
return i2c_smbus_write_byte_data(client, reg, val);
}
-#ifdef CONFIG_GPIOLIB
-static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
+static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -93,7 +234,7 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
+ unsigned int off, int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -106,13 +247,47 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
+ adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
mutex_unlock(&kpad->gpio_lock);
}
-static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
+ unsigned long config)
+{
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
+ unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+ unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ bool pull_disable;
+ int ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pull_disable = false;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ pull_disable = true;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&kpad->gpio_lock);
+
+ if (pull_disable)
+ kpad->pull_dis[bank] |= bit;
+ else
+ kpad->pull_dis[bank] &= bit;
+
+ ret = adp5588_write(kpad->client, GPIO_PULL1 + bank,
+ kpad->pull_dis[bank]);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return ret;
+}
+
+static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -130,7 +305,7 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
}
static int adp5588_gpio_direction_output(struct gpio_chip *chip,
- unsigned off, int val)
+ unsigned int off, int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -147,17 +322,19 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
kpad->dat_out[bank] &= ~bit;
ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
- ret |= adp5588_write(kpad->client, GPIO_DIR1 + bank,
- kpad->dir[bank]);
+ kpad->dat_out[bank]);
+ if (ret)
+ goto out_unlock;
+
+ ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+out_unlock:
mutex_unlock(&kpad->gpio_lock);
return ret;
}
-static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
- const struct adp5588_kpad_platform_data *pdata)
+static int adp5588_build_gpiomap(struct adp5588_kpad *kpad)
{
bool pin_used[ADP5588_MAXGPIO];
int n_unused = 0;
@@ -165,15 +342,12 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
memset(pin_used, 0, sizeof(pin_used));
- for (i = 0; i < pdata->rows; i++)
+ for (i = 0; i < kpad->rows; i++)
pin_used[i] = true;
- for (i = 0; i < pdata->cols; i++)
+ for (i = 0; i < kpad->cols; i++)
pin_used[i + GPI_PIN_COL_BASE - GPI_PIN_BASE] = true;
- for (i = 0; i < kpad->gpimapsize; i++)
- pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
-
for (i = 0; i < ADP5588_MAXGPIO; i++)
if (!pin_used[i])
kpad->gpiomap[n_unused++] = i;
@@ -181,47 +355,101 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
-static void adp5588_gpio_do_teardown(void *_kpad)
+static void adp5588_irq_bus_lock(struct irq_data *d)
{
- struct adp5588_kpad *kpad = _kpad;
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
- if (error)
- dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+ mutex_lock(&kpad->gpio_lock);
}
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ int i;
+
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (kpad->int_en[i] ^ kpad->irq_mask[i]) {
+ kpad->int_en[i] = kpad->irq_mask[i];
+ adp5588_write(kpad->client, GPI_EM1 + i, kpad->int_en[i]);
+ }
+ }
+
+ mutex_unlock(&kpad->gpio_lock);
+}
+
+static void adp5588_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long real_irq = kpad->gpiomap[hwirq];
+
+ kpad->irq_mask[ADP5588_BANK(real_irq)] &= ~ADP5588_BIT(real_irq);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void adp5588_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5588_kpad *kpad = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long real_irq = kpad->gpiomap[hwirq];
+
+ gpiochip_enable_irq(gc, hwirq);
+ kpad->irq_mask[ADP5588_BANK(real_irq)] |= ADP5588_BIT(real_irq);
+}
+
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip adp5588_irq_chip = {
+ .name = "adp5588",
+ .irq_mask = adp5588_irq_mask,
+ .irq_unmask = adp5588_irq_unmask,
+ .irq_bus_lock = adp5588_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock,
+ .irq_set_type = adp5588_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ struct gpio_irq_chip *girq;
int i, error;
- if (!gpio_data)
- return 0;
-
- kpad->gc.ngpio = adp5588_build_gpiomap(kpad, pdata);
+ kpad->gc.ngpio = adp5588_build_gpiomap(kpad);
if (kpad->gc.ngpio == 0) {
dev_info(dev, "No unused gpios left to export\n");
return 0;
}
+ kpad->gc.parent = &kpad->client->dev;
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
kpad->gc.set = adp5588_gpio_set_value;
+ kpad->gc.set_config = adp5588_gpio_set_config;
kpad->gc.can_sleep = 1;
- kpad->gc.base = gpio_data->gpio_start;
+ kpad->gc.base = -1;
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
- kpad->gc.names = gpio_data->names;
+
+ girq = &kpad->gc.irq;
+ gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
mutex_init(&kpad->gpio_lock);
@@ -235,54 +463,87 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->dat_out[i] = adp5588_read(kpad->client,
GPIO_DAT_OUT1 + i);
kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
+ kpad->pull_dis[i] = adp5588_read(kpad->client, GPIO_PULL1 + i);
}
- if (gpio_data->setup) {
- error = gpio_data->setup(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
- if (error)
- dev_warn(dev, "setup failed: %d\n", error);
- }
+ return 0;
+}
- if (gpio_data->teardown) {
- error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
- if (error)
- dev_warn(dev, "failed to schedule teardown: %d\n",
- error);
- }
+static unsigned long adp5588_gpiomap_get_hwirq(struct device *dev,
+ const u8 *map, unsigned int gpio,
+ unsigned int ngpios)
+{
+ unsigned int hwirq;
- return 0;
+ for (hwirq = 0; hwirq < ngpios; hwirq++)
+ if (map[hwirq] == gpio)
+ return hwirq;
+
+ /* should never happen */
+ dev_warn_ratelimited(dev, "could not find the hwirq for gpio(%u)\n", gpio);
+
+ return ADP5588_INVALID_HWIRQ;
}
-#else
-static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
+static void adp5588_gpio_irq_handle(struct adp5588_kpad *kpad, int key_val,
+ int key_press)
{
- return 0;
+ unsigned int irq, gpio = key_val - GPI_PIN_BASE, irq_type;
+ struct i2c_client *client = kpad->client;
+ struct irq_data *irqd;
+ unsigned long hwirq;
+
+ hwirq = adp5588_gpiomap_get_hwirq(&client->dev, kpad->gpiomap,
+ gpio, kpad->gc.ngpio);
+ if (hwirq == ADP5588_INVALID_HWIRQ) {
+ dev_err(&client->dev, "Could not get hwirq for key(%u)\n", key_val);
+ return;
+ }
+
+ irq = irq_find_mapping(kpad->gc.irq.domain, hwirq);
+ if (!irq)
+ return;
+
+ irqd = irq_get_irq_data(irq);
+ if (!irqd) {
+ dev_err(&client->dev, "Could not get irq(%u) data\n", irq);
+ return;
+ }
+
+ irq_type = irqd_get_trigger_type(irqd);
+
+ /*
+ * Default is active low which means key_press is asserted on
+ * the falling edge.
+ */
+ if ((irq_type & IRQ_TYPE_EDGE_RISING && !key_press) ||
+ (irq_type & IRQ_TYPE_EDGE_FALLING && key_press))
+ handle_nested_irq(irq);
}
-#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
{
- int i, j;
+ int i;
for (i = 0; i < ev_cnt; i++) {
- int key = adp5588_read(kpad->client, Key_EVENTA + i);
+ int key = adp5588_read(kpad->client, KEY_EVENTA + i);
int key_val = key & KEY_EV_MASK;
+ int key_press = key & KEY_EV_PRESSED;
if (key_val >= GPI_PIN_BASE && key_val <= GPI_PIN_END) {
- for (j = 0; j < kpad->gpimapsize; j++) {
- if (key_val == kpad->gpimap[j].pin) {
- input_report_switch(kpad->input,
- kpad->gpimap[j].sw_evt,
- key & KEY_EV_PRESSED);
- break;
- }
- }
+ /* gpio line used as IRQ source */
+ adp5588_gpio_irq_handle(kpad, key_val, key_press);
} else {
+ int row = (key_val - 1) / ADP5588_COLS_MAX;
+ int col = (key_val - 1) % ADP5588_COLS_MAX;
+ int code = MATRIX_SCAN_CODE(row, col, kpad->row_shift);
+
+ dev_dbg_ratelimited(&kpad->client->dev,
+ "report key(%d) r(%d) c(%d) code(%d)\n",
+ key_val, row, col, kpad->keycode[code]);
+
input_report_key(kpad->input,
- kpad->keycode[key_val - 1],
- key & KEY_EV_PRESSED);
+ kpad->keycode[code], key_press);
}
}
}
@@ -335,176 +596,145 @@ static irqreturn_t adp5588_thread_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int adp5588_setup(struct i2c_client *client)
+static int adp5588_setup(struct adp5588_kpad *kpad)
{
- const struct adp5588_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ struct i2c_client *client = kpad->client;
int i, ret;
- unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
-
- ret = adp5588_write(client, KP_GPIO1, KP_SEL(pdata->rows));
- ret |= adp5588_write(client, KP_GPIO2, KP_SEL(pdata->cols) & 0xFF);
- ret |= adp5588_write(client, KP_GPIO3, KP_SEL(pdata->cols) >> 8);
- if (pdata->en_keylock) {
- ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
- ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
- ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
- }
+ ret = adp5588_write(client, KP_GPIO1, KP_SEL(kpad->rows));
+ if (ret)
+ return ret;
- for (i = 0; i < KEYP_MAX_EVENT; i++)
- ret |= adp5588_read(client, Key_EVENTA);
+ ret = adp5588_write(client, KP_GPIO2, KP_SEL(kpad->cols) & 0xFF);
+ if (ret)
+ return ret;
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
+ ret = adp5588_write(client, KP_GPIO3, KP_SEL(kpad->cols) >> 8);
+ if (ret)
+ return ret;
- if (pin <= GPI_PIN_ROW_END) {
- evt_mode1 |= (1 << (pin - GPI_PIN_ROW_BASE));
- } else {
- evt_mode2 |= ((1 << (pin - GPI_PIN_COL_BASE)) & 0xFF);
- evt_mode3 |= ((1 << (pin - GPI_PIN_COL_BASE)) >> 8);
- }
+ for (i = 0; i < kpad->nkeys_unlock; i++) {
+ ret = adp5588_write(client, UNLOCK1 + i, kpad->unlock_keys[i]);
+ if (ret)
+ return ret;
}
- if (pdata->gpimapsize) {
- ret |= adp5588_write(client, GPI_EM1, evt_mode1);
- ret |= adp5588_write(client, GPI_EM2, evt_mode2);
- ret |= adp5588_write(client, GPI_EM3, evt_mode3);
+ if (kpad->nkeys_unlock) {
+ ret = adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
+ if (ret)
+ return ret;
}
- if (gpio_data) {
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
- int pull_mask = gpio_data->pullup_dis_mask;
-
- ret |= adp5588_write(client, GPIO_PULL1 + i,
- (pull_mask >> (8 * i)) & 0xFF);
- }
+ for (i = 0; i < KEYP_MAX_EVENT; i++) {
+ ret = adp5588_read(client, KEY_EVENTA);
+ if (ret)
+ return ret;
}
- ret |= adp5588_write(client, INT_STAT,
- ADP5588_CMP2_INT | ADP5588_CMP1_INT |
- ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
- ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
+ ret = adp5588_write(client, INT_STAT,
+ ADP5588_CMP2_INT | ADP5588_CMP1_INT |
+ ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
+ ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
+ if (ret)
+ return ret;
- ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
- ADP5588_OVR_FLOW_IEN |
- ADP5588_KE_IEN);
+ return adp5588_write(client, CFG, ADP5588_INT_CFG |
+ ADP5588_OVR_FLOW_IEN | ADP5588_KE_IEN);
+}
+
+static int adp5588_fw_parse(struct adp5588_kpad *kpad)
+{
+ struct i2c_client *client = kpad->client;
+ int ret, i;
- if (ret < 0) {
- dev_err(&client->dev, "Write Error\n");
+ ret = matrix_keypad_parse_properties(&client->dev, &kpad->rows,
+ &kpad->cols);
+ if (ret)
return ret;
+
+ if (kpad->rows > ADP5588_ROWS_MAX || kpad->cols > ADP5588_COLS_MAX) {
+ dev_err(&client->dev, "Invalid nr of rows(%u) or cols(%u)\n",
+ kpad->rows, kpad->cols);
+ return -EINVAL;
}
- return 0;
-}
+ ret = matrix_keypad_build_keymap(NULL, NULL, kpad->rows, kpad->cols,
+ kpad->keycode, kpad->input);
+ if (ret)
+ return ret;
-static void adp5588_report_switch_state(struct adp5588_kpad *kpad)
-{
- int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1);
- int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2);
- int gpi_stat3 = adp5588_read(kpad->client, GPIO_DAT_STAT3);
- int gpi_stat_tmp, pin_loc;
- int i;
+ kpad->row_shift = get_count_order(kpad->cols);
- for (i = 0; i < kpad->gpimapsize; i++) {
- unsigned short pin = kpad->gpimap[i].pin;
+ if (device_property_read_bool(&client->dev, "autorepeat"))
+ __set_bit(EV_REP, kpad->input->evbit);
- if (pin <= GPI_PIN_ROW_END) {
- gpi_stat_tmp = gpi_stat1;
- pin_loc = pin - GPI_PIN_ROW_BASE;
- } else if ((pin - GPI_PIN_COL_BASE) < 8) {
- gpi_stat_tmp = gpi_stat2;
- pin_loc = pin - GPI_PIN_COL_BASE;
- } else {
- gpi_stat_tmp = gpi_stat3;
- pin_loc = pin - GPI_PIN_COL_BASE - 8;
- }
+ kpad->nkeys_unlock = device_property_count_u32(&client->dev,
+ "adi,unlock-keys");
+ if (kpad->nkeys_unlock <= 0) {
+ /* so that we don't end up enabling key lock */
+ kpad->nkeys_unlock = 0;
+ return 0;
+ }
- if (gpi_stat_tmp < 0) {
- dev_err(&kpad->client->dev,
- "Can't read GPIO_DAT_STAT switch %d default to OFF\n",
- pin);
- gpi_stat_tmp = 0;
+ if (kpad->nkeys_unlock > ARRAY_SIZE(kpad->unlock_keys)) {
+ dev_err(&client->dev, "number of unlock keys(%d) > (%zu)\n",
+ kpad->nkeys_unlock, ARRAY_SIZE(kpad->unlock_keys));
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32_array(&client->dev, "adi,unlock-keys",
+ kpad->unlock_keys,
+ kpad->nkeys_unlock);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < kpad->nkeys_unlock; i++) {
+ /*
+ * Even though it should be possible (as stated in the datasheet)
+ * to use GPIs (which are part of the keys event) as unlock keys,
+ * it was not working at all and was leading to overflow events
+ * at some point. Hence, for now, let's just allow keys which are
+ * part of keypad matrix to be used and if a reliable way of
+ * using GPIs is found, this condition can be removed/lightened.
+ */
+ if (kpad->unlock_keys[i] >= kpad->cols * kpad->rows) {
+ dev_err(&client->dev, "Invalid unlock key(%d)\n",
+ kpad->unlock_keys[i]);
+ return -EINVAL;
}
- input_report_switch(kpad->input,
- kpad->gpimap[i].sw_evt,
- !(gpi_stat_tmp & (1 << pin_loc)));
+ /*
+ * Firmware properties keys start from 0 but on the device they
+ * start from 1.
+ */
+ kpad->unlock_keys[i] += 1;
}
- input_sync(kpad->input);
+ return 0;
}
+static void adp5588_disable_regulator(void *reg)
+{
+ regulator_disable(reg);
+}
static int adp5588_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
- const struct adp5588_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct input_dev *input;
+ struct gpio_desc *gpio;
+ struct regulator *vcc;
unsigned int revid;
- int ret, i;
+ int ret;
int error;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
return -EIO;
}
- if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
- }
-
- if (!pdata->rows || !pdata->cols || !pdata->keymap) {
- dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->keymapsize != ADP5588_KEYMAPSIZE) {
- dev_err(&client->dev, "invalid keymapsize\n");
- return -EINVAL;
- }
-
- if (!pdata->gpimap && pdata->gpimapsize) {
- dev_err(&client->dev, "invalid gpimap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->gpimapsize > ADP5588_GPIMAPSIZE_MAX) {
- dev_err(&client->dev, "invalid gpimapsize\n");
- return -EINVAL;
- }
-
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
-
- if (pin < GPI_PIN_BASE || pin > GPI_PIN_END) {
- dev_err(&client->dev, "invalid gpi pin data\n");
- return -EINVAL;
- }
-
- if (pin <= GPI_PIN_ROW_END) {
- if (pin - GPI_PIN_ROW_BASE + 1 <= pdata->rows) {
- dev_err(&client->dev, "invalid gpi row data\n");
- return -EINVAL;
- }
- } else {
- if (pin - GPI_PIN_COL_BASE + 1 <= pdata->cols) {
- dev_err(&client->dev, "invalid gpi col data\n");
- return -EINVAL;
- }
- }
- }
-
- if (!client->irq) {
- dev_err(&client->dev, "no IRQ?\n");
- return -EINVAL;
- }
-
kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
if (!kpad)
return -ENOMEM;
@@ -516,11 +746,38 @@ static int adp5588_probe(struct i2c_client *client,
kpad->client = client;
kpad->input = input;
+ error = adp5588_fw_parse(kpad);
+ if (error)
+ return error;
+
+ vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(vcc))
+ return PTR_ERR(vcc);
+
+ error = regulator_enable(vcc);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(&client->dev,
+ adp5588_disable_regulator, vcc);
+ if (error)
+ return error;
+
+ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ fsleep(30);
+ gpiod_set_value_cansleep(gpio, 0);
+ fsleep(60);
+ }
+
ret = adp5588_read(client, DEV_ID);
if (ret < 0)
return ret;
- revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
+ revid = ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
@@ -534,32 +791,6 @@ static int adp5588_probe(struct i2c_client *client,
input->id.product = 0x0001;
input->id.version = revid;
- input->keycodesize = sizeof(kpad->keycode[0]);
- input->keycodemax = pdata->keymapsize;
- input->keycode = kpad->keycode;
-
- memcpy(kpad->keycode, pdata->keymap,
- pdata->keymapsize * input->keycodesize);
-
- kpad->gpimap = pdata->gpimap;
- kpad->gpimapsize = pdata->gpimapsize;
-
- /* setup input device */
- __set_bit(EV_KEY, input->evbit);
-
- if (pdata->repeat)
- __set_bit(EV_REP, input->evbit);
-
- for (i = 0; i < input->keycodemax; i++)
- if (kpad->keycode[i] <= KEY_MAX)
- __set_bit(kpad->keycode[i], input->keybit);
- __clear_bit(KEY_RESERVED, input->keybit);
-
- if (kpad->gpimapsize)
- __set_bit(EV_SW, input->evbit);
- for (i = 0; i < kpad->gpimapsize; i++)
- __set_bit(kpad->gpimap[i].sw_evt, input->swbit);
-
error = input_register_device(input);
if (error) {
dev_err(&client->dev, "unable to register input device: %d\n",
@@ -567,6 +798,14 @@ static int adp5588_probe(struct i2c_client *client,
return error;
}
+ error = adp5588_setup(kpad);
+ if (error)
+ return error;
+
+ error = adp5588_gpio_add(kpad);
+ if (error)
+ return error;
+
error = devm_request_threaded_irq(&client->dev, client->irq,
adp5588_hard_irq, adp5588_thread_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -577,17 +816,6 @@ static int adp5588_probe(struct i2c_client *client,
return error;
}
- error = adp5588_setup(client);
- if (error)
- return error;
-
- if (kpad->gpimapsize)
- adp5588_report_switch_state(kpad);
-
- error = adp5588_gpio_add(kpad);
- if (error)
- return error;
-
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
}
@@ -599,7 +827,7 @@ static void adp5588_remove(struct i2c_client *client)
/* all resources will be freed by devm */
}
-static int __maybe_unused adp5588_suspend(struct device *dev)
+static int adp5588_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -608,7 +836,7 @@ static int __maybe_unused adp5588_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused adp5588_resume(struct device *dev)
+static int adp5588_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -617,7 +845,7 @@ static int __maybe_unused adp5588_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -626,10 +854,18 @@ static const struct i2c_device_id adp5588_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adp5588_id);
+static const struct of_device_id adp5588_of_match[] = {
+ { .compatible = "adi,adp5588" },
+ { .compatible = "adi,adp5587" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adp5588_of_match);
+
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = &adp5588_dev_pm_ops,
+ .of_match_table = adp5588_of_match,
+ .pm = pm_sleep_ptr(&adp5588_dev_pm_ops),
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 09551f64d53f..a20a4e186639 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -10,9 +10,6 @@
* Amiga keyboard driver for Linux/m68k
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index cbc6c0d4670a..91a9810f6980 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -202,7 +202,7 @@ struct command_protocol_tp_info {
};
/**
- * struct touchpad_info - touchpad info response.
+ * struct touchpad_info_protocol - touchpad info response.
* message.type = 0x1020, message.length = 0x006e
*
* @unknown1: unknown
@@ -311,7 +311,7 @@ struct message {
struct command_protocol_mt_init init_mt_command;
struct command_protocol_capsl capsl_command;
struct command_protocol_bl bl_command;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
};
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 77ed54630601..07e17e563f9b 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -21,9 +21,6 @@
* This driver only deals with handing key events off to the input layer.
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d4131236d18c..246958795f60 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -323,11 +323,13 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group atkbd_attribute_group = {
+static const struct attribute_group atkbd_attribute_group = {
.attrs = atkbd_attributes,
.is_visible = atkbd_attr_is_visible,
};
+__ATTRIBUTE_GROUPS(atkbd_attribute);
+
static const unsigned int xl_table[] = {
ATKBD_RET_BAT, ATKBD_RET_ERR, ATKBD_RET_ACK,
ATKBD_RET_NAK, ATKBD_RET_HANJA, ATKBD_RET_HANGEUL,
@@ -922,8 +924,6 @@ static void atkbd_disconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
- sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
-
atkbd_disable(atkbd);
input_unregister_device(atkbd->dev);
@@ -1271,21 +1271,16 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd_set_keycode_table(atkbd);
atkbd_set_device_attrs(atkbd);
- err = sysfs_create_group(&serio->dev.kobj, &atkbd_attribute_group);
- if (err)
- goto fail3;
-
atkbd_enable(atkbd);
if (serio->write)
atkbd_activate(atkbd);
err = input_register_device(atkbd->dev);
if (err)
- goto fail4;
+ goto fail3;
return 0;
- fail4: sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(dev);
@@ -1378,7 +1373,8 @@ MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
static struct serio_driver atkbd_drv = {
.driver = {
- .name = "atkbd",
+ .name = "atkbd",
+ .dev_groups = atkbd_attribute_groups,
},
.description = DRIVER_DESC,
.id_table = atkbd_serio_ids,
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index 939c88655fc0..4c1a3e611edd 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -6,9 +6,11 @@
*/
#include <linux/input.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/input/matrix_keypad.h>
@@ -86,7 +88,6 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
{
struct clps711x_keypad_data *priv;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct input_dev *input;
u32 poll_interval;
int i, err;
@@ -95,11 +96,11 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ priv->syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
- priv->row_count = of_gpio_named_count(np, "row-gpios");
+ priv->row_count = gpiod_count(dev, "row");
if (priv->row_count < 1)
return -EINVAL;
@@ -119,7 +120,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return PTR_ERR(data->desc);
}
- err = of_property_read_u32(np, "poll-interval", &poll_interval);
+ err = device_property_read_u32(dev, "poll-interval", &poll_interval);
if (err)
return err;
@@ -143,7 +144,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return err;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (of_property_read_bool(np, "autorepeat"))
+ if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input->evbit);
/* Set all columns to low */
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7a3b0664ab4f..f5bf7524722a 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/soc/cirrus/ep93xx.h>
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ae9303848571..e15a93619e82 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index e4a1839ca934..047b654b3752 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -46,9 +46,6 @@
* http://www.vt100.net/manx/details?pn=EK-104AA-TM-001;id=21;cp=1
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -359,18 +356,18 @@ static void lkkbd_detection_done(struct lkkbd *lk)
*/
switch (lk->id[4]) {
case 1:
- strlcpy(lk->name, "DEC LK201 keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK201 keyboard", sizeof(lk->name));
if (lk201_compose_is_alt)
lk->keycode[0xb1] = KEY_LEFTALT;
break;
case 2:
- strlcpy(lk->name, "DEC LK401 keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK401 keyboard", sizeof(lk->name));
break;
default:
- strlcpy(lk->name, "Unknown DEC keyboard", sizeof(lk->name));
+ strscpy(lk->name, "Unknown DEC keyboard", sizeof(lk->name));
printk(KERN_ERR
"lkkbd: keyboard on %s is unknown, please report to "
"Jan-Benedict Glaw <jbglaw@lug-owl.de>\n", lk->phys);
@@ -626,7 +623,7 @@ static int lkkbd_connect(struct serio *serio, struct serio_driver *drv)
lk->ctrlclick_volume = ctrlclick_volume;
memcpy(lk->keycode, lkkbd_keycode, sizeof(lk->keycode));
- strlcpy(lk->name, "DEC LK keyboard", sizeof(lk->name));
+ strscpy(lk->name, "DEC LK keyboard", sizeof(lk->name));
snprintf(lk->phys, sizeof(lk->phys), "%s/input0", serio->phys);
input_dev->name = lk->name;
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 9dac22c14125..3052cd6dedac 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -4,13 +4,13 @@
* Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
#include <linux/i2c.h>
-#include <linux/interrupt.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/input/lm8333.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define LM8333_FIFO_READ 0x20
#define LM8333_DEBOUNCE 0x22
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 30924b57058f..7dd3f3eda834 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/irq.h>
@@ -416,9 +417,9 @@ matrix_keypad_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- pdata->num_row_gpios = nrow = of_gpio_named_count(np, "row-gpios");
- pdata->num_col_gpios = ncol = of_gpio_named_count(np, "col-gpios");
- if (nrow <= 0 || ncol <= 0) {
+ pdata->num_row_gpios = nrow = gpiod_count(dev, "row");
+ pdata->num_col_gpios = ncol = gpiod_count(dev, "col");
+ if (nrow < 0 || ncol < 0) {
dev_err(dev, "number of keypad rows/columns not specified\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index bf447bf598fb..19f69d167fbd 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -18,6 +19,7 @@
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_DOUBLE_KP_MODE BIT(0)
#define MTK_KPD_SEL_COL GENMASK(15, 10)
#define MTK_KPD_SEL_ROW GENMASK(9, 4)
#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
@@ -31,6 +33,8 @@ struct mt6779_keypad {
struct clk *clk;
u32 n_rows;
u32 n_cols;
+ void (*calc_row_col)(unsigned int key,
+ unsigned int *row, unsigned int *col);
DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS);
};
@@ -67,8 +71,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
continue;
key = bit_nr / 32 * 16 + bit_nr % 32;
- row = key / 9;
- col = key % 9;
+ keypad->calc_row_col(key, &row, &col);
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
@@ -94,12 +97,29 @@ static void mt6779_keypad_clk_disable(void *data)
clk_disable_unprepare(data);
}
+static void mt6779_keypad_calc_row_col_single(unsigned int key,
+ unsigned int *row,
+ unsigned int *col)
+{
+ *row = key / 9;
+ *col = key % 9;
+}
+
+static void mt6779_keypad_calc_row_col_double(unsigned int key,
+ unsigned int *row,
+ unsigned int *col)
+{
+ *row = key / 13;
+ *col = (key % 13) / 2;
+}
+
static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
{
struct mt6779_keypad *keypad;
void __iomem *base;
int irq;
u32 debounce;
+ u32 keys_per_group;
bool wakeup;
int error;
@@ -148,6 +168,23 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (device_property_read_u32(&pdev->dev, "mediatek,keys-per-group",
+ &keys_per_group))
+ keys_per_group = 1;
+
+ switch (keys_per_group) {
+ case 1:
+ keypad->calc_row_col = mt6779_keypad_calc_row_col_single;
+ break;
+ case 2:
+ keypad->calc_row_col = mt6779_keypad_calc_row_col_double;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Invalid keys-per-group: %d\n", keys_per_group);
+ return -EINVAL;
+ }
+
wakeup = device_property_read_bool(&pdev->dev, "wakeup-source");
dev_dbg(&pdev->dev, "n_row=%d n_col=%d debounce=%d\n",
@@ -166,6 +203,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ if (keys_per_group == 2)
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL,
+ MTK_KPD_SEL_DOUBLE_KP_MODE,
+ MTK_KPD_SEL_DOUBLE_KP_MODE);
+
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
MTK_KPD_SEL_ROWMASK(keypad->n_rows));
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 6404081253ea..9b34da0ec260 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -22,6 +23,10 @@
#define MTK_PMIC_PWRKEY_RST BIT(6)
#define MTK_PMIC_HOMEKEY_RST BIT(5)
+#define MTK_PMIC_MT6331_RST_DU_MASK GENMASK(13, 12)
+#define MTK_PMIC_MT6331_PWRKEY_RST BIT(9)
+#define MTK_PMIC_MT6331_HOMEKEY_RST BIT(8)
+
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
#define MTK_PMIC_MAX_KEY_COUNT 2
@@ -72,6 +77,19 @@ static const struct mtk_pmic_regs mt6323_regs = {
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
+static const struct mtk_pmic_regs mt6331_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6331_TOPSTATUS, 0x2,
+ MT6331_INT_MISC_CON, 0x4,
+ MTK_PMIC_MT6331_PWRKEY_RST),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6331_TOPSTATUS, 0x4,
+ MT6331_INT_MISC_CON, 0x2,
+ MTK_PMIC_MT6331_HOMEKEY_RST),
+ .pmic_rst_reg = MT6331_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_MT6331_RST_DU_MASK,
+};
+
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
@@ -256,6 +274,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6323-keys",
.data = &mt6323_regs,
}, {
+ .compatible = "mediatek,mt6331-keys",
+ .data = &mt6331_regs,
+ }, {
.compatible = "mediatek,mt6358-keys",
.data = &mt6358_regs,
}, {
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index 9742261b2d1a..df00a119aa9a 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -7,9 +7,6 @@
* Newton keyboard driver for Linux
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/pinephone-keyboard.c b/drivers/input/keyboard/pinephone-keyboard.c
new file mode 100644
index 000000000000..5548699b8b38
--- /dev/null
+++ b/drivers/input/keyboard/pinephone-keyboard.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2021-2022 Samuel Holland <samuel@sholland.org>
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#define DRV_NAME "pinephone-keyboard"
+
+#define PPKB_CRC8_POLYNOMIAL 0x07
+
+#define PPKB_DEVICE_ID_HI 0x00
+#define PPKB_DEVICE_ID_HI_VALUE 'K'
+#define PPKB_DEVICE_ID_LO 0x01
+#define PPKB_DEVICE_ID_LO_VALUE 'B'
+#define PPKB_FW_REVISION 0x02
+#define PPKB_FW_FEATURES 0x03
+#define PPKB_MATRIX_SIZE 0x06
+#define PPKB_SCAN_CRC 0x07
+#define PPKB_SCAN_DATA 0x08
+#define PPKB_SYS_CONFIG 0x20
+#define PPKB_SYS_CONFIG_DISABLE_SCAN BIT(0)
+#define PPKB_SYS_SMBUS_COMMAND 0x21
+#define PPKB_SYS_SMBUS_DATA 0x22
+#define PPKB_SYS_COMMAND 0x23
+#define PPKB_SYS_COMMAND_SMBUS_READ 0x91
+#define PPKB_SYS_COMMAND_SMBUS_WRITE 0xa1
+
+#define PPKB_ROWS 6
+#define PPKB_COLS 12
+
+/* Size of the scan buffer, including the CRC byte at the beginning. */
+#define PPKB_BUF_LEN (1 + PPKB_COLS)
+
+static const uint32_t ppkb_keymap[] = {
+ KEY(0, 0, KEY_ESC),
+ KEY(0, 1, KEY_1),
+ KEY(0, 2, KEY_2),
+ KEY(0, 3, KEY_3),
+ KEY(0, 4, KEY_4),
+ KEY(0, 5, KEY_5),
+ KEY(0, 6, KEY_6),
+ KEY(0, 7, KEY_7),
+ KEY(0, 8, KEY_8),
+ KEY(0, 9, KEY_9),
+ KEY(0, 10, KEY_0),
+ KEY(0, 11, KEY_BACKSPACE),
+
+ KEY(1, 0, KEY_TAB),
+ KEY(1, 1, KEY_Q),
+ KEY(1, 2, KEY_W),
+ KEY(1, 3, KEY_E),
+ KEY(1, 4, KEY_R),
+ KEY(1, 5, KEY_T),
+ KEY(1, 6, KEY_Y),
+ KEY(1, 7, KEY_U),
+ KEY(1, 8, KEY_I),
+ KEY(1, 9, KEY_O),
+ KEY(1, 10, KEY_P),
+ KEY(1, 11, KEY_ENTER),
+
+ KEY(2, 0, KEY_LEFTMETA),
+ KEY(2, 1, KEY_A),
+ KEY(2, 2, KEY_S),
+ KEY(2, 3, KEY_D),
+ KEY(2, 4, KEY_F),
+ KEY(2, 5, KEY_G),
+ KEY(2, 6, KEY_H),
+ KEY(2, 7, KEY_J),
+ KEY(2, 8, KEY_K),
+ KEY(2, 9, KEY_L),
+ KEY(2, 10, KEY_SEMICOLON),
+
+ KEY(3, 0, KEY_LEFTSHIFT),
+ KEY(3, 1, KEY_Z),
+ KEY(3, 2, KEY_X),
+ KEY(3, 3, KEY_C),
+ KEY(3, 4, KEY_V),
+ KEY(3, 5, KEY_B),
+ KEY(3, 6, KEY_N),
+ KEY(3, 7, KEY_M),
+ KEY(3, 8, KEY_COMMA),
+ KEY(3, 9, KEY_DOT),
+ KEY(3, 10, KEY_SLASH),
+
+ KEY(4, 1, KEY_LEFTCTRL),
+ KEY(4, 4, KEY_SPACE),
+ KEY(4, 6, KEY_APOSTROPHE),
+ KEY(4, 8, KEY_RIGHTBRACE),
+ KEY(4, 9, KEY_LEFTBRACE),
+
+ KEY(5, 2, KEY_FN),
+ KEY(5, 3, KEY_LEFTALT),
+ KEY(5, 5, KEY_RIGHTALT),
+
+ /* FN layer */
+ KEY(PPKB_ROWS + 0, 0, KEY_FN_ESC),
+ KEY(PPKB_ROWS + 0, 1, KEY_F1),
+ KEY(PPKB_ROWS + 0, 2, KEY_F2),
+ KEY(PPKB_ROWS + 0, 3, KEY_F3),
+ KEY(PPKB_ROWS + 0, 4, KEY_F4),
+ KEY(PPKB_ROWS + 0, 5, KEY_F5),
+ KEY(PPKB_ROWS + 0, 6, KEY_F6),
+ KEY(PPKB_ROWS + 0, 7, KEY_F7),
+ KEY(PPKB_ROWS + 0, 8, KEY_F8),
+ KEY(PPKB_ROWS + 0, 9, KEY_F9),
+ KEY(PPKB_ROWS + 0, 10, KEY_F10),
+ KEY(PPKB_ROWS + 0, 11, KEY_DELETE),
+
+ KEY(PPKB_ROWS + 1, 10, KEY_PAGEUP),
+
+ KEY(PPKB_ROWS + 2, 0, KEY_SYSRQ),
+ KEY(PPKB_ROWS + 2, 9, KEY_PAGEDOWN),
+ KEY(PPKB_ROWS + 2, 10, KEY_INSERT),
+
+ KEY(PPKB_ROWS + 3, 0, KEY_LEFTSHIFT),
+ KEY(PPKB_ROWS + 3, 8, KEY_HOME),
+ KEY(PPKB_ROWS + 3, 9, KEY_UP),
+ KEY(PPKB_ROWS + 3, 10, KEY_END),
+
+ KEY(PPKB_ROWS + 4, 1, KEY_LEFTCTRL),
+ KEY(PPKB_ROWS + 4, 6, KEY_LEFT),
+ KEY(PPKB_ROWS + 4, 8, KEY_RIGHT),
+ KEY(PPKB_ROWS + 4, 9, KEY_DOWN),
+
+ KEY(PPKB_ROWS + 5, 3, KEY_LEFTALT),
+ KEY(PPKB_ROWS + 5, 5, KEY_RIGHTALT),
+};
+
+static const struct matrix_keymap_data ppkb_keymap_data = {
+ .keymap = ppkb_keymap,
+ .keymap_size = ARRAY_SIZE(ppkb_keymap),
+};
+
+struct pinephone_keyboard {
+ struct i2c_adapter adapter;
+ struct input_dev *input;
+ u8 buf[2][PPKB_BUF_LEN];
+ u8 crc_table[CRC8_TABLE_SIZE];
+ u8 fn_state[PPKB_COLS];
+ bool buf_swap;
+ bool fn_pressed;
+};
+
+static int ppkb_adap_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ struct i2c_client *client = adap->algo_data;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = command;
+ buf[1] = data->byte;
+ buf[2] = read_write == I2C_SMBUS_READ ? PPKB_SYS_COMMAND_SMBUS_READ
+ : PPKB_SYS_COMMAND_SMBUS_WRITE;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PPKB_SYS_SMBUS_COMMAND,
+ sizeof(buf), buf);
+ if (ret)
+ return ret;
+
+ /* Read back the command status until it passes or fails. */
+ do {
+ usleep_range(300, 500);
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_COMMAND);
+ } while (ret == buf[2]);
+ if (ret < 0)
+ return ret;
+ /* Commands return 0x00 on success and 0xff on failure. */
+ if (ret)
+ return -EIO;
+
+ if (read_write == I2C_SMBUS_READ) {
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_SMBUS_DATA);
+ if (ret < 0)
+ return ret;
+
+ data->byte = ret;
+ }
+
+ return 0;
+}
+
+static u32 ppkg_adap_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static const struct i2c_algorithm ppkb_adap_algo = {
+ .smbus_xfer = ppkb_adap_smbus_xfer,
+ .functionality = ppkg_adap_functionality,
+};
+
+static void ppkb_update(struct i2c_client *client)
+{
+ struct pinephone_keyboard *ppkb = i2c_get_clientdata(client);
+ unsigned short *keymap = ppkb->input->keycode;
+ int row_shift = get_count_order(PPKB_COLS);
+ u8 *old_buf = ppkb->buf[!ppkb->buf_swap];
+ u8 *new_buf = ppkb->buf[ppkb->buf_swap];
+ int col, crc, ret, row;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PPKB_SCAN_CRC,
+ PPKB_BUF_LEN, new_buf);
+ if (ret != PPKB_BUF_LEN) {
+ dev_err(dev, "Failed to read scan data: %d\n", ret);
+ return;
+ }
+
+ crc = crc8(ppkb->crc_table, &new_buf[1], PPKB_COLS, CRC8_INIT_VALUE);
+ if (crc != new_buf[0]) {
+ dev_err(dev, "Bad scan data (%02x != %02x)\n", crc, new_buf[0]);
+ return;
+ }
+
+ ppkb->buf_swap = !ppkb->buf_swap;
+
+ for (col = 0; col < PPKB_COLS; ++col) {
+ u8 old = old_buf[1 + col];
+ u8 new = new_buf[1 + col];
+ u8 changed = old ^ new;
+
+ if (!changed)
+ continue;
+
+ for (row = 0; row < PPKB_ROWS; ++row) {
+ u8 mask = BIT(row);
+ u8 value = new & mask;
+ unsigned short code;
+ bool fn_state;
+
+ if (!(changed & mask))
+ continue;
+
+ /*
+ * Save off the FN key state when the key was pressed,
+ * and use that to determine the code during a release.
+ */
+ fn_state = value ? ppkb->fn_pressed : ppkb->fn_state[col] & mask;
+ if (fn_state)
+ ppkb->fn_state[col] ^= mask;
+
+ /* The FN layer is a second set of rows. */
+ code = MATRIX_SCAN_CODE(fn_state ? PPKB_ROWS + row : row,
+ col, row_shift);
+ input_event(ppkb->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(ppkb->input, keymap[code], value);
+ if (keymap[code] == KEY_FN)
+ ppkb->fn_pressed = value;
+ }
+ }
+ input_sync(ppkb->input);
+}
+
+static irqreturn_t ppkb_irq_thread(int irq, void *data)
+{
+ struct i2c_client *client = data;
+
+ ppkb_update(client);
+
+ return IRQ_HANDLED;
+}
+
+static int ppkb_set_scan(struct i2c_client *client, bool enable)
+{
+ struct device *dev = &client->dev;
+ int ret, val;
+
+ ret = i2c_smbus_read_byte_data(client, PPKB_SYS_CONFIG);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read config: %d\n", ret);
+ return ret;
+ }
+
+ if (enable)
+ val = ret & ~PPKB_SYS_CONFIG_DISABLE_SCAN;
+ else
+ val = ret | PPKB_SYS_CONFIG_DISABLE_SCAN;
+
+ ret = i2c_smbus_write_byte_data(client, PPKB_SYS_CONFIG, val);
+ if (ret) {
+ dev_err(dev, "Failed to write config: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ppkb_open(struct input_dev *input)
+{
+ struct i2c_client *client = input_get_drvdata(input);
+ int error;
+
+ error = ppkb_set_scan(client, true);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void ppkb_close(struct input_dev *input)
+{
+ struct i2c_client *client = input_get_drvdata(input);
+
+ ppkb_set_scan(client, false);
+}
+
+static void ppkb_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
+static int ppkb_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ unsigned int phys_rows, phys_cols;
+ struct pinephone_keyboard *ppkb;
+ struct regulator *vbat_supply;
+ u8 info[PPKB_MATRIX_SIZE + 1];
+ struct device_node *i2c_bus;
+ int ret;
+ int error;
+
+ vbat_supply = devm_regulator_get(dev, "vbat");
+ error = PTR_ERR_OR_ZERO(vbat_supply);
+ if (error) {
+ dev_err(dev, "Failed to get VBAT supply: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(vbat_supply);
+ if (error) {
+ dev_err(dev, "Failed to enable VBAT: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, ppkb_regulator_disable,
+ vbat_supply);
+ if (error)
+ return error;
+
+ ret = i2c_smbus_read_i2c_block_data(client, 0, sizeof(info), info);
+ if (ret != sizeof(info)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to read device ID: %d\n", error);
+ return error;
+ }
+
+ if (info[PPKB_DEVICE_ID_HI] != PPKB_DEVICE_ID_HI_VALUE ||
+ info[PPKB_DEVICE_ID_LO] != PPKB_DEVICE_ID_LO_VALUE) {
+ dev_warn(dev, "Unexpected device ID: %#02x %#02x\n",
+ info[PPKB_DEVICE_ID_HI], info[PPKB_DEVICE_ID_LO]);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Found firmware version %d.%d features %#x\n",
+ info[PPKB_FW_REVISION] >> 4,
+ info[PPKB_FW_REVISION] & 0xf,
+ info[PPKB_FW_FEATURES]);
+
+ phys_rows = info[PPKB_MATRIX_SIZE] & 0xf;
+ phys_cols = info[PPKB_MATRIX_SIZE] >> 4;
+ if (phys_rows != PPKB_ROWS || phys_cols != PPKB_COLS) {
+ dev_err(dev, "Unexpected keyboard size %ux%u\n",
+ phys_rows, phys_cols);
+ return -EINVAL;
+ }
+
+ /* Disable scan by default to save power. */
+ error = ppkb_set_scan(client, false);
+ if (error)
+ return error;
+
+ ppkb = devm_kzalloc(dev, sizeof(*ppkb), GFP_KERNEL);
+ if (!ppkb)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ppkb);
+
+ i2c_bus = of_get_child_by_name(dev->of_node, "i2c");
+ if (i2c_bus) {
+ ppkb->adapter.owner = THIS_MODULE;
+ ppkb->adapter.algo = &ppkb_adap_algo;
+ ppkb->adapter.algo_data = client;
+ ppkb->adapter.dev.parent = dev;
+ ppkb->adapter.dev.of_node = i2c_bus;
+ strscpy(ppkb->adapter.name, DRV_NAME, sizeof(ppkb->adapter.name));
+
+ error = devm_i2c_add_adapter(dev, &ppkb->adapter);
+ if (error) {
+ dev_err(dev, "Failed to add I2C adapter: %d\n", error);
+ return error;
+ }
+ }
+
+ crc8_populate_msb(ppkb->crc_table, PPKB_CRC8_POLYNOMIAL);
+
+ ppkb->input = devm_input_allocate_device(dev);
+ if (!ppkb->input)
+ return -ENOMEM;
+
+ input_set_drvdata(ppkb->input, client);
+
+ ppkb->input->name = "PinePhone Keyboard";
+ ppkb->input->phys = DRV_NAME "/input0";
+ ppkb->input->id.bustype = BUS_I2C;
+ ppkb->input->open = ppkb_open;
+ ppkb->input->close = ppkb_close;
+
+ input_set_capability(ppkb->input, EV_MSC, MSC_SCAN);
+ __set_bit(EV_REP, ppkb->input->evbit);
+
+ error = matrix_keypad_build_keymap(&ppkb_keymap_data, NULL,
+ 2 * PPKB_ROWS, PPKB_COLS, NULL,
+ ppkb->input);
+ if (error) {
+ dev_err(dev, "Failed to build keymap: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(ppkb->input);
+ if (error) {
+ dev_err(dev, "Failed to register input: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, ppkb_irq_thread,
+ IRQF_ONESHOT, client->name, client);
+ if (error) {
+ dev_err(dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ppkb_of_match[] = {
+ { .compatible = "pine64,pinephone-keyboard" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ppkb_of_match);
+
+static struct i2c_driver ppkb_driver = {
+ .probe_new = ppkb_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ppkb_of_match,
+ },
+};
+module_i2c_driver(ppkb_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Pine64 PinePhone keyboard driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index a045d61165ac..a62bb8fff88c 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -8,12 +8,14 @@
* Based on sh_keysc.c, copyright 2008 Magnus Damm
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/io.h>
+#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#define ST_KEYSCAN_MAXKEYS 16
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index a4977193dd4a..56e784936059 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -10,9 +10,6 @@
* by Justin Cormack
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index d450f11b98a7..b123a208ef36 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -7,9 +7,6 @@
* Sun keyboard driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 89b9575dc75d..78e55318ccd6 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,7 +70,7 @@
#define TC3589x_KBD_INT_CLR 0x1
/**
- * struct tc35893_keypad_platform_data - platform specific keypad data
+ * struct tc3589x_keypad_platform_data - platform specific keypad data
* @keymap_data: matrix scan code table for keycodes
* @krow: mask for available rows, value is 0xFF
* @kcol: mask for available columns, value is 0xFF
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 280796df679a..c9d7c2481726 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -7,9 +7,6 @@
* XT keyboard driver for Linux
*/
-/*
- */
-
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index a18ab7358d8f..9f088900f863 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -730,6 +730,24 @@ config INPUT_ADXL34X_SPI
To compile this driver as a module, choose M here: the
module will be called adxl34x-spi.
+config INPUT_IBM_PANEL
+ tristate "IBM Operation Panel driver"
+ depends on I2C && I2C_SLAVE
+ help
+ Say Y here if you have an IBM Operation Panel connected to your system
+ over I2C. The panel is typically connected only to a system's service
+ processor (BMC).
+
+ If unsure, say N.
+
+ The Operation Panel is a controller with some buttons and an LCD
+ display that allows someone with physical access to the system to
+ perform various administrative tasks. This driver only supports the part
+ of the controller that sends commands to the system.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ibm-panel.
+
config INPUT_IMS_PCU
tristate "IMS Passenger Control Unit driver"
depends on USB
@@ -891,6 +909,15 @@ config INPUT_SC27XX_VIBRA
To compile this driver as a module, choose M here. The module will
be called sc27xx_vibra.
+config INPUT_RT5120_PWRKEY
+ tristate "RT5120 PMIC power key support"
+ depends on MFD_RT5120 || COMPILE_TEST
+ help
+ This enables support for RT5120 PMIC power key driver.
+
+ To compile this driver as a module, choose M here. the module will
+ be called rt5120-pwrkey.
+
config INPUT_STPMIC1_ONKEY
tristate "STPMIC1 PMIC Onkey support"
depends on MFD_STPMIC1
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 28dfc444f0a9..6abefc41037b 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
+obj-$(CONFIG_INPUT_IBM_PANEL) += ibm-panel.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
@@ -69,6 +70,7 @@ obj-$(CONFIG_INPUT_RAVE_SP_PWRBUTTON) += rave-sp-pwrbutton.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
+obj-$(CONFIG_INPUT_RT5120_PWRKEY) += rt5120-pwrkey.o
obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
diff --git a/drivers/input/misc/ibm-panel.c b/drivers/input/misc/ibm-panel.c
new file mode 100644
index 000000000000..a8fba0054719
--- /dev/null
+++ b/drivers/input/misc/ibm-panel.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) IBM Corporation 2020
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+#define DEVICE_NAME "ibm-panel"
+#define PANEL_KEYCODES_COUNT 3
+
+struct ibm_panel {
+ u8 idx;
+ u8 command[11];
+ u32 keycodes[PANEL_KEYCODES_COUNT];
+ spinlock_t lock; /* protects writes to idx and command */
+ struct input_dev *input;
+};
+
+static u8 ibm_panel_calculate_checksum(struct ibm_panel *panel)
+{
+ u8 chksum;
+ u16 sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sizeof(panel->command) - 1; ++i) {
+ sum += panel->command[i];
+ if (sum & 0xff00) {
+ sum &= 0xff;
+ sum++;
+ }
+ }
+
+ chksum = sum & 0xff;
+ chksum = ~chksum;
+ chksum++;
+
+ return chksum;
+}
+
+static void ibm_panel_process_command(struct ibm_panel *panel)
+{
+ u8 button;
+ u8 chksum;
+
+ if (panel->command[0] != 0xff && panel->command[1] != 0xf0) {
+ dev_dbg(&panel->input->dev, "command invalid: %02x %02x\n",
+ panel->command[0], panel->command[1]);
+ return;
+ }
+
+ chksum = ibm_panel_calculate_checksum(panel);
+ if (chksum != panel->command[sizeof(panel->command) - 1]) {
+ dev_dbg(&panel->input->dev,
+ "command failed checksum: %u != %u\n", chksum,
+ panel->command[sizeof(panel->command) - 1]);
+ return;
+ }
+
+ button = panel->command[2] & 0xf;
+ if (button < PANEL_KEYCODES_COUNT) {
+ input_report_key(panel->input, panel->keycodes[button],
+ !(panel->command[2] & 0x80));
+ input_sync(panel->input);
+ } else {
+ dev_dbg(&panel->input->dev, "unknown button %u\n",
+ button);
+ }
+}
+
+static int ibm_panel_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ unsigned long flags;
+ struct ibm_panel *panel = i2c_get_clientdata(client);
+
+ dev_dbg(&panel->input->dev, "event: %u data: %02x\n", event, *val);
+
+ spin_lock_irqsave(&panel->lock, flags);
+
+ switch (event) {
+ case I2C_SLAVE_STOP:
+ if (panel->idx == sizeof(panel->command))
+ ibm_panel_process_command(panel);
+ else
+ dev_dbg(&panel->input->dev,
+ "command incorrect size %u\n", panel->idx);
+ fallthrough;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ panel->idx = 0;
+ break;
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (panel->idx < sizeof(panel->command))
+ panel->command[panel->idx++] = *val;
+ else
+ /*
+ * The command is too long and therefore invalid, so set the index
+ * to it's largest possible value. When a STOP is finally received,
+ * the command will be rejected upon processing.
+ */
+ panel->idx = U8_MAX;
+ break;
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ *val = 0xff;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&panel->lock, flags);
+
+ return 0;
+}
+
+static int ibm_panel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ibm_panel *panel;
+ int i;
+ int error;
+
+ panel = devm_kzalloc(&client->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ spin_lock_init(&panel->lock);
+
+ panel->input = devm_input_allocate_device(&client->dev);
+ if (!panel->input)
+ return -ENOMEM;
+
+ panel->input->name = client->name;
+ panel->input->id.bustype = BUS_I2C;
+
+ error = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ panel->keycodes,
+ PANEL_KEYCODES_COUNT);
+ if (error) {
+ /*
+ * Use gamepad buttons as defaults for compatibility with
+ * existing applications.
+ */
+ panel->keycodes[0] = BTN_NORTH;
+ panel->keycodes[1] = BTN_SOUTH;
+ panel->keycodes[2] = BTN_SELECT;
+ }
+
+ for (i = 0; i < PANEL_KEYCODES_COUNT; ++i)
+ input_set_capability(panel->input, EV_KEY, panel->keycodes[i]);
+
+ error = input_register_device(panel->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ i2c_set_clientdata(client, panel);
+ error = i2c_slave_register(client, ibm_panel_i2c_slave_cb);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register as i2c slave: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void ibm_panel_remove(struct i2c_client *client)
+{
+ i2c_slave_unregister(client);
+}
+
+static const struct of_device_id ibm_panel_match[] = {
+ { .compatible = "ibm,op-panel" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ibm_panel_match);
+
+static struct i2c_driver ibm_panel_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ibm_panel_match,
+ },
+ .probe = ibm_panel_probe,
+ .remove = ibm_panel_remove,
+};
+module_i2c_driver(ibm_panel_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("IBM Operation Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6f38aa23a1ff..b2f1292e27ef 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -744,7 +744,7 @@ static int ims_pcu_switch_to_bootloader(struct ims_pcu *pcu)
error = ims_pcu_execute_command(pcu, JUMP_TO_BTLDR, NULL, 0);
if (error) {
dev_err(pcu->dev,
- "Failure when sending JUMP TO BOOLTLOADER command, error: %d\n",
+ "Failure when sending JUMP TO BOOTLOADER command, error: %d\n",
error);
return error;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index b2e8097a2e6d..ddb863bf63ee 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -1077,7 +1077,7 @@ static int iqs7222_hard_reset(struct iqs7222_private *iqs7222)
static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
{
- u8 msg_buf[] = { 0xFF, 0x00, };
+ u8 msg_buf[] = { 0xFF, };
int ret;
/*
@@ -1771,11 +1771,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
if (!chan_node)
return 0;
- if (dev_desc->allow_offset) {
- sys_setup[dev_desc->allow_offset] |= BIT(chan_index);
- if (fwnode_property_present(chan_node, "azoteq,ulp-allow"))
- sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
- }
+ if (dev_desc->allow_offset &&
+ fwnode_property_present(chan_node, "azoteq,ulp-allow"))
+ sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
chan_setup[0] |= IQS7222_CHAN_SETUP_0_CHAN_EN;
@@ -2206,6 +2204,9 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
u16 *sys_setup = iqs7222->sys_setup;
int error, i;
+ if (dev_desc->allow_offset)
+ sys_setup[dev_desc->allow_offset] = U16_MAX;
+
if (dev_desc->event_offset)
sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI;
@@ -2326,6 +2327,9 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
int k = 2 + j * (num_chan > 16 ? 2 : 1);
u16 state = le16_to_cpu(status[k + i / 16]);
+ if (!iqs7222->kp_type[i][j])
+ continue;
+
input_event(iqs7222->keypad,
iqs7222->kp_type[i][j],
iqs7222->kp_code[i][j],
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 4650f4a94989..bee4b1376491 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -485,7 +485,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
}
if (udev->manufacturer)
- strlcpy(remote->name, udev->manufacturer, sizeof(remote->name));
+ strscpy(remote->name, udev->manufacturer, sizeof(remote->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/input/misc/rt5120-pwrkey.c b/drivers/input/misc/rt5120-pwrkey.c
new file mode 100644
index 000000000000..8a8c1aeeed05
--- /dev/null
+++ b/drivers/input/misc/rt5120-pwrkey.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define RT5120_REG_INTSTAT 0x1E
+#define RT5120_PWRKEYSTAT_MASK BIT(7)
+
+struct rt5120_priv {
+ struct regmap *regmap;
+ struct input_dev *input;
+};
+
+static irqreturn_t rt5120_pwrkey_handler(int irq, void *devid)
+{
+ struct rt5120_priv *priv = devid;
+ unsigned int stat;
+ int error;
+
+ error = regmap_read(priv->regmap, RT5120_REG_INTSTAT, &stat);
+ if (error)
+ return IRQ_NONE;
+
+ input_report_key(priv->input, KEY_POWER,
+ !(stat & RT5120_PWRKEYSTAT_MASK));
+ input_sync(priv->input);
+
+ return IRQ_HANDLED;
+}
+
+static int rt5120_pwrkey_probe(struct platform_device *pdev)
+{
+ struct rt5120_priv *priv;
+ struct device *dev = &pdev->dev;
+ int press_irq, release_irq;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap) {
+ dev_err(dev, "Failed to init regmap\n");
+ return -ENODEV;
+ }
+
+ press_irq = platform_get_irq_byname(pdev, "pwrkey-press");
+ if (press_irq < 0)
+ return press_irq;
+
+ release_irq = platform_get_irq_byname(pdev, "pwrkey-release");
+ if (release_irq < 0)
+ return release_irq;
+
+ /* Make input device be device resource managed */
+ priv->input = devm_input_allocate_device(dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ priv->input->name = "rt5120_pwrkey";
+ priv->input->phys = "rt5120_pwrkey/input0";
+ priv->input->id.bustype = BUS_I2C;
+ input_set_capability(priv->input, EV_KEY, KEY_POWER);
+
+ error = input_register_device(priv->input);
+ if (error) {
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, press_irq,
+ NULL, rt5120_pwrkey_handler,
+ 0, "pwrkey-press", priv);
+ if (error) {
+ dev_err(dev,
+ "Failed to register pwrkey press irq: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, release_irq,
+ NULL, rt5120_pwrkey_handler,
+ 0, "pwrkey-release", priv);
+ if (error) {
+ dev_err(dev,
+ "Failed to register pwrkey release irq: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id r5120_pwrkey_match_table[] = {
+ { .compatible = "richtek,rt5120-pwrkey" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, r5120_pwrkey_match_table);
+
+static struct platform_driver rt5120_pwrkey_driver = {
+ .driver = {
+ .name = "rt5120-pwrkey",
+ .of_match_table = r5120_pwrkey_match_table,
+ },
+ .probe = rt5120_pwrkey_probe,
+};
+module_platform_driver(rt5120_pwrkey_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5120 power key driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b307cca17022..e3ee0638ffba 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -26,6 +26,7 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl.h>
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index e0ff616fb857..5619996da86f 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -163,14 +163,10 @@ static int __maybe_unused twl4030_vibra_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
-static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
- struct device_node *parent)
+static bool twl4030_vibra_check_coexist(struct device_node *parent)
{
struct device_node *node;
- if (pdata && pdata->coexist)
- return true;
-
node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
@@ -182,13 +178,12 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
static int twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_vibra_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
struct vibra_info *info;
int ret;
- if (!pdata && !twl4030_core_node) {
- dev_dbg(&pdev->dev, "platform_data not available\n");
+ if (!twl4030_core_node) {
+ dev_dbg(&pdev->dev, "twl4030 OF node is missing\n");
return -EINVAL;
}
@@ -197,7 +192,7 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = &pdev->dev;
- info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
+ info->coexist = twl4030_vibra_check_coexist(twl4030_core_node);
INIT_WORK(&info->play_work, vibra_play_work);
info->input_dev = devm_input_allocate_device(&pdev->dev);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index e1758d5ffe42..d4eb59b55bf1 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1311,12 +1311,6 @@ static int elan_probe(struct i2c_client *client,
return error;
}
- error = devm_device_add_groups(dev, elan_sysfs_groups);
- if (error) {
- dev_err(dev, "failed to create sysfs attributes: %d\n", error);
- return error;
- }
-
error = input_register_device(data->input);
if (error) {
dev_err(dev, "failed to register input device: %d\n", error);
@@ -1442,6 +1436,7 @@ static struct i2c_driver elan_driver = {
.acpi_match_table = ACPI_PTR(elan_acpi_id),
.of_match_table = of_match_ptr(elan_of_match),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .dev_groups = elan_sysfs_groups,
},
.probe = elan_probe,
.id_table = elan_id,
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 4dc441309aac..3c8310da0b05 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -884,7 +884,7 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
/*
* We queue work instead of doing recalibration right here
- * to avoid adding locking to to hgpk_force_recalibrate()
+ * to avoid adding locking to hgpk_force_recalibrate()
* since workqueue provides serialization.
*/
psmouse_queue_work(psmouse, &priv->recalib_wq, 0);
@@ -1057,7 +1057,7 @@ void hgpk_module_init(void)
strlen(hgpk_mode_name));
if (hgpk_default_mode == HGPK_MODE_INVALID) {
hgpk_default_mode = HGPK_MODE_MOUSE;
- strlcpy(hgpk_mode_name, hgpk_mode_names[HGPK_MODE_MOUSE],
+ strscpy(hgpk_mode_name, hgpk_mode_names[HGPK_MODE_MOUSE],
sizeof(hgpk_mode_name));
}
}
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index df5d1160478c..401d8bff8e84 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -13,9 +13,6 @@
* Inport (ATI XL and Microsoft) busmouse driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index bd647f9f505a..0aab63dbc30a 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -14,9 +14,6 @@
* Logitech Bus Mouse Driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index f75574766b85..efa58049f746 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -10,9 +10,6 @@
* IBM PC110 touchpad driver for Linux
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 0b4a3039f312..c9a7e87b273e 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -94,7 +94,7 @@ PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resync_time),
psmouse_show_int_attr, psmouse_set_int_attr);
-static struct attribute *psmouse_attributes[] = {
+static struct attribute *psmouse_dev_attrs[] = {
&psmouse_attr_protocol.dattr.attr,
&psmouse_attr_rate.dattr.attr,
&psmouse_attr_resolution.dattr.attr,
@@ -103,9 +103,7 @@ static struct attribute *psmouse_attributes[] = {
NULL
};
-static const struct attribute_group psmouse_attribute_group = {
- .attrs = psmouse_attributes,
-};
+ATTRIBUTE_GROUPS(psmouse_dev);
/*
* psmouse_mutex protects all operations changing state of mouse
@@ -1481,8 +1479,6 @@ static void psmouse_disconnect(struct serio *serio)
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
- sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
-
mutex_lock(&psmouse_mutex);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
@@ -1647,10 +1643,6 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
if (parent && parent->pt_activate)
parent->pt_activate(parent);
- error = sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group);
- if (error)
- goto err_pt_deactivate;
-
/*
* PS/2 devices having SMBus companions should stay disabled
* on PS/2 side, in order to have SMBus part operable.
@@ -1666,13 +1658,6 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
mutex_unlock(&psmouse_mutex);
return retval;
- err_pt_deactivate:
- if (parent && parent->pt_deactivate)
- parent->pt_deactivate(parent);
- if (input_dev) {
- input_unregister_device(input_dev);
- input_dev = NULL; /* so we don't try to free it below */
- }
err_protocol_disconnect:
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
@@ -1791,7 +1776,8 @@ MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
static struct serio_driver psmouse_drv = {
.driver = {
- .name = "psmouse",
+ .name = "psmouse",
+ .dev_groups = psmouse_dev_groups,
},
.description = DRIVER_DESC,
.id_table = psmouse_serio_ids,
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index caa79c177c55..993f90333380 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -7,9 +7,6 @@
* Serial mouse driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ffad142801b3..fa021af8506e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0099", /* X1 Extreme Gen 1 / P1 Gen 1 */
"LEN009b", /* T580 */
"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
+ "LEN040f", /* P1 Gen 3 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */
@@ -714,8 +715,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
}
serio->id.type = SERIO_PS_PSTHRU;
- strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
- strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->phys));
+ strscpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
+ strscpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->phys));
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index b5ff27e32a0c..75e45f3ae675 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -354,7 +354,7 @@ static int synusb_probe(struct usb_interface *intf,
synusb->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (udev->manufacturer)
- strlcpy(synusb->name, udev->manufacturer,
+ strscpy(synusb->name, udev->manufacturer,
sizeof(synusb->name));
if (udev->product) {
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index bd415f4b574e..8af8e4a15f95 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -13,9 +13,6 @@
*/
/*
- */
-
-/*
* Building an adaptor to DE9 / DB25 RS232
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -138,12 +135,12 @@ static void vsxxxaa_detection_done(struct vsxxxaa *mouse)
{
switch (mouse->type) {
case 0x02:
- strlcpy(mouse->name, "DEC VSXXX-AA/-GA mouse",
+ strscpy(mouse->name, "DEC VSXXX-AA/-GA mouse",
sizeof(mouse->name));
break;
case 0x04:
- strlcpy(mouse->name, "DEC VSXXX-AB digitizer",
+ strscpy(mouse->name, "DEC VSXXX-AB digitizer",
sizeof(mouse->name));
break;
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index c194b1664b10..1e11ea30d7bd 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -181,7 +181,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
serio->close = rmi_f03_pt_close;
serio->port_data = f03;
- strlcpy(serio->name, "RMI4 PS/2 pass-through", sizeof(serio->name));
+ strscpy(serio->name, "RMI4 PS/2 pass-through", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0",
dev_name(&f03->fn->dev));
serio->dev.parent = &f03->fn->dev;
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index e5dca9868f87..0d9a5756e3f5 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -114,13 +114,13 @@ static irqreturn_t rmi_f34_attention(int irq, void *ctx)
complete(&f34->v5.cmd_done);
} else {
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr +
- f34->v7.off.flash_status,
- &status, sizeof(status));
- rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+ f34->fn->fd.data_base_addr +
+ V7_COMMAND_OFFSET,
+ &status, sizeof(status));
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: cmd: %#02x, ret: %d\n",
__func__, status, ret);
- if (!ret && !(status & 0x1f))
+ if (!ret && status == CMD_V7_IDLE)
complete(&f34->v7.cmd_done);
}
@@ -321,13 +321,13 @@ static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
f34 = dev_get_drvdata(&fn->dev);
if (f34->bl_version == 5)
- return scnprintf(buf, PAGE_SIZE, "%c%c\n",
- f34->bootloader_id[0],
- f34->bootloader_id[1]);
+ return sysfs_emit(buf, "%c%c\n",
+ f34->bootloader_id[0],
+ f34->bootloader_id[1]);
else
- return scnprintf(buf, PAGE_SIZE, "V%d.%d\n",
- f34->bootloader_id[1],
- f34->bootloader_id[0]);
+ return sysfs_emit(buf, "V%d.%d\n",
+ f34->bootloader_id[1],
+ f34->bootloader_id[0]);
}
return 0;
@@ -346,7 +346,7 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
if (fn) {
f34 = dev_get_drvdata(&fn->dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", f34->configuration_id);
+ return sysfs_emit(buf, "%s\n", f34->configuration_id);
}
return 0;
@@ -370,7 +370,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
f34 = dev_get_drvdata(&data->f34_container->dev);
- if (f34->bl_version == 7) {
+ if (f34->bl_version >= 7) {
if (data->pdt_props & HAS_BSR) {
dev_err(dev, "%s: LTS not supported\n", __func__);
return -ENODEV;
@@ -382,7 +382,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
}
/* Enter flash mode */
- if (f34->bl_version == 7)
+ if (f34->bl_version >= 7)
ret = rmi_f34v7_start_reflash(f34, fw);
else
ret = rmi_f34_enable_flash(f34);
@@ -413,7 +413,7 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
f34 = dev_get_drvdata(&data->f34_container->dev);
/* Perform firmware update */
- if (f34->bl_version == 7)
+ if (f34->bl_version >= 7)
ret = rmi_f34v7_do_reflash(f34, fw);
else
ret = rmi_f34_update_firmware(f34, fw);
@@ -499,7 +499,7 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
if (data->f34_container)
update_status = rmi_f34_status(data->f34_container);
- return scnprintf(buf, PAGE_SIZE, "%d\n", update_status);
+ return sysfs_emit(buf, "%d\n", update_status);
}
static DEVICE_ATTR(update_fw_status, 0444,
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
index 99faa8c2269d..cfa3039804fd 100644
--- a/drivers/input/rmi4/rmi_f34.h
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -222,20 +222,6 @@ struct image_metadata {
struct physical_address phyaddr;
};
-struct register_offset {
- u8 properties;
- u8 properties_2;
- u8 block_size;
- u8 block_count;
- u8 gc_block_count;
- u8 flash_status;
- u8 partition_id;
- u8 block_number;
- u8 transfer_length;
- u8 flash_cmd;
- u8 payload;
-};
-
struct rmi_f34_firmware {
__le32 checksum;
u8 pad1[3];
@@ -262,7 +248,6 @@ struct f34v5_data {
struct f34v7_data {
bool has_display_cfg;
bool has_guest_code;
- bool force_update;
bool in_bl_mode;
u8 *read_config_buf;
size_t read_config_buf_size;
@@ -276,9 +261,7 @@ struct f34v7_data {
u16 payload_length;
u8 partitions;
u16 partition_table_bytes;
- bool new_partition_table;
- struct register_offset off;
struct block_count blkcount;
struct physical_address phyaddr;
struct image_metadata img;
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
index 8d7ec9d89b18..886557b01eba 100644
--- a/drivers/input/rmi4/rmi_f34v7.c
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -25,7 +25,7 @@ static int rmi_f34v7_read_flash_status(struct f34_data *f34)
int ret;
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+ f34->fn->fd.data_base_addr + V7_FLASH_STATUS_OFFSET,
&status,
sizeof(status));
if (ret < 0) {
@@ -43,7 +43,7 @@ static int rmi_f34v7_read_flash_status(struct f34_data *f34)
}
ret = rmi_read_block(f34->fn->rmi_dev,
- f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+ f34->fn->fd.data_base_addr + V7_COMMAND_OFFSET,
&command,
sizeof(command));
if (ret < 0) {
@@ -72,6 +72,24 @@ static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
return 0;
}
+static int rmi_f34v7_check_command_status(struct f34_data *f34, int timeout_ms)
+{
+ int ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, timeout_ms);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.flash_status != 0x00)
+ return -EIO;
+
+ return 0;
+}
+
static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
u8 cmd)
{
@@ -122,7 +140,7 @@ static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
data_1_5.payload[1] = f34->bootloader_id[1];
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.partition_id,
+ base + V7_PARTITION_ID_OFFSET,
&data_1_5, sizeof(data_1_5));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -195,7 +213,7 @@ static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
__func__, command);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.flash_cmd,
+ base + V7_COMMAND_OFFSET,
&command, sizeof(command));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
@@ -262,7 +280,7 @@ static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
}
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.partition_id,
+ base + V7_PARTITION_ID_OFFSET,
&partition, sizeof(partition));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
@@ -290,7 +308,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -301,7 +319,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
put_unaligned_le16(f34->v7.flash_config_length, &length);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
@@ -318,6 +336,10 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
return ret;
}
+ /*
+ * rmi_f34v7_check_command_status() can't be used here, as this
+ * function is called before IRQs are available
+ */
timeout = msecs_to_jiffies(F34_WRITE_WAIT_MS);
while (time_before(jiffies, timeout)) {
usleep_range(5000, 6000);
@@ -330,7 +352,7 @@ static int rmi_f34v7_read_partition_table(struct f34_data *f34)
}
ret = rmi_read_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
f34->v7.read_config_buf,
f34->v7.partition_table_bytes);
if (ret < 0) {
@@ -504,13 +526,6 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
__func__, f34->v7.block_size);
- f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
- f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
- f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
- f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
- f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
- f34->v7.off.payload = V7_PAYLOAD_OFFSET;
-
f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
f34->v7.has_guest_code =
query_1_7.partition_support[1] & HAS_GUEST_CODE;
@@ -571,68 +586,6 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
return 0;
}
-static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.ui_firmware) {
- dev_err(&f34->fn->dev,
- "UI firmware size mismatch: %d != %d\n",
- block_count, f34->v7.blkcount.ui_firmware);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.ui_config) {
- dev_err(&f34->fn->dev, "UI config size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.dp_config) {
- dev_err(&f34->fn->dev, "Display config size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
-{
- u16 block_count;
-
- block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
- f34->update_size += block_count;
-
- if (block_count != f34->v7.blkcount.guest_code) {
- dev_err(&f34->fn->dev, "Guest code size mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
{
u16 block_count;
@@ -648,58 +601,6 @@ static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
return 0;
}
-static int rmi_f34v7_erase_config(struct f34_data *f34)
-{
- int ret;
-
- dev_info(&f34->fn->dev, "Erasing config...\n");
-
- init_completion(&f34->v7.cmd_done);
-
- switch (f34->v7.config_area) {
- case v7_UI_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
- if (ret < 0)
- return ret;
- break;
- case v7_DP_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
- if (ret < 0)
- return ret;
- break;
- case v7_BL_CONFIG_AREA:
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
- if (ret < 0)
- return ret;
- break;
- }
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
-{
- int ret;
-
- dev_info(&f34->fn->dev, "Erasing guest code...\n");
-
- init_completion(&f34->v7.cmd_done);
-
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
- if (ret < 0)
- return ret;
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int rmi_f34v7_erase_all(struct f34_data *f34)
{
int ret;
@@ -708,32 +609,14 @@ static int rmi_f34v7_erase_all(struct f34_data *f34)
init_completion(&f34->v7.cmd_done);
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
- if (ret < 0)
- return ret;
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_ERASE_WAIT_MS);
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_ALL);
if (ret < 0)
return ret;
- f34->v7.config_area = v7_UI_CONFIG_AREA;
- ret = rmi_f34v7_erase_config(f34);
+ ret = rmi_f34v7_check_command_status(f34, F34_ERASE_WAIT_MS);
if (ret < 0)
return ret;
- if (f34->v7.has_display_cfg) {
- f34->v7.config_area = v7_DP_CONFIG_AREA;
- ret = rmi_f34v7_erase_config(f34);
- if (ret < 0)
- return ret;
- }
-
- if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
- ret = rmi_f34v7_erase_guest_code(f34);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -756,7 +639,7 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -772,7 +655,7 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
put_unaligned_le16(transfer, &length);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -787,12 +670,12 @@ static int rmi_f34v7_read_blocks(struct f34_data *f34,
if (ret < 0)
return ret;
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
ret = rmi_read_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
&f34->v7.read_config_buf[index],
transfer * f34->v7.block_size);
if (ret < 0) {
@@ -828,7 +711,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.block_number,
+ base + V7_BLOCK_NUMBER_OFFSET,
&block_number, sizeof(block_number));
if (ret < 0) {
dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
@@ -848,7 +731,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
init_completion(&f34->v7.cmd_done);
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.transfer_length,
+ base + V7_TRANSFER_LENGTH_OFFSET,
&length, sizeof(length));
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -862,7 +745,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
ret = rmi_write_block(f34->fn->rmi_dev,
- base + f34->v7.off.payload,
+ base + V7_PAYLOAD_OFFSET,
block_ptr, transfer * f34->v7.block_size);
if (ret < 0) {
dev_err(&f34->fn->dev,
@@ -871,7 +754,7 @@ static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
return ret;
}
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
@@ -937,17 +820,6 @@ static int rmi_f34v7_write_flash_config(struct f34_data *f34)
init_completion(&f34->v7.cmd_done);
- ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
- if (ret < 0)
- return ret;
-
- rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
- "%s: Erase flash config command written\n", __func__);
-
- ret = rmi_f34v7_wait_for_idle(f34, F34_WRITE_WAIT_MS);
- if (ret < 0)
- return ret;
-
ret = rmi_f34v7_write_config(f34);
if (ret < 0)
return ret;
@@ -977,10 +849,6 @@ static int rmi_f34v7_write_partition_table(struct f34_data *f34)
if (ret < 0)
return ret;
- ret = rmi_f34v7_erase_config(f34);
- if (ret < 0)
- return ret;
-
ret = rmi_f34v7_write_flash_config(f34);
if (ret < 0)
return ret;
@@ -1007,33 +875,6 @@ static int rmi_f34v7_write_firmware(struct f34_data *f34)
blk_count, v7_CMD_WRITE_FW);
}
-static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
-{
- if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.has_display_cfg &&
- f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- if (f34->v7.has_guest_code &&
- f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
- f34->v7.new_partition_table = true;
- return;
- }
-
- f34->v7.new_partition_table = false;
-}
-
static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
const void *image)
{
@@ -1180,8 +1021,6 @@ static int rmi_f34v7_parse_image_info(struct f34_data *f34)
rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
&f34->v7.img.blkcount, &f34->v7.img.phyaddr);
- rmi_f34v7_compare_partition_tables(f34);
-
return 0;
}
@@ -1200,53 +1039,35 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
ret = rmi_f34v7_parse_image_info(f34);
if (ret < 0)
- goto fail;
-
- if (!f34->v7.new_partition_table) {
- ret = rmi_f34v7_check_ui_firmware_size(f34);
- if (ret < 0)
- goto fail;
-
- ret = rmi_f34v7_check_ui_config_size(f34);
- if (ret < 0)
- goto fail;
-
- if (f34->v7.has_display_cfg &&
- f34->v7.img.contains_display_cfg) {
- ret = rmi_f34v7_check_dp_config_size(f34);
- if (ret < 0)
- goto fail;
- }
+ return ret;
- if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
- ret = rmi_f34v7_check_guest_code_size(f34);
- if (ret < 0)
- goto fail;
- }
- } else {
- ret = rmi_f34v7_check_bl_config_size(f34);
- if (ret < 0)
- goto fail;
- }
+ ret = rmi_f34v7_check_bl_config_size(f34);
+ if (ret < 0)
+ return ret;
ret = rmi_f34v7_erase_all(f34);
if (ret < 0)
- goto fail;
+ return ret;
- if (f34->v7.new_partition_table) {
- ret = rmi_f34v7_write_partition_table(f34);
- if (ret < 0)
- goto fail;
- dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
- __func__);
- }
+ ret = rmi_f34v7_write_partition_table(f34);
+ if (ret < 0)
+ return ret;
+ dev_info(&f34->fn->dev, "%s: Partition table programmed\n", __func__);
+
+ /*
+ * Reset to reload partition table - as the previous firmware has been
+ * erased, we remain in bootloader mode.
+ */
+ ret = rmi_scan_pdt(f34->fn->rmi_dev, NULL, rmi_initial_reset);
+ if (ret < 0)
+ dev_warn(&f34->fn->dev, "RMI reset failed!\n");
dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
f34->v7.img.ui_firmware.size);
ret = rmi_f34v7_write_firmware(f34);
if (ret < 0)
- goto fail;
+ return ret;
dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
f34->v7.img.ui_config.size);
@@ -1254,28 +1075,25 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
f34->v7.config_area = v7_UI_CONFIG_AREA;
ret = rmi_f34v7_write_ui_config(f34);
if (ret < 0)
- goto fail;
+ return ret;
if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
dev_info(&f34->fn->dev, "Writing display config...\n");
ret = rmi_f34v7_write_dp_config(f34);
if (ret < 0)
- goto fail;
+ return ret;
}
- if (f34->v7.new_partition_table) {
- if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
- dev_info(&f34->fn->dev, "Writing guest code...\n");
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ dev_info(&f34->fn->dev, "Writing guest code...\n");
- ret = rmi_f34v7_write_guest_code(f34);
- if (ret < 0)
- goto fail;
- }
+ ret = rmi_f34v7_write_guest_code(f34);
+ if (ret < 0)
+ return ret;
}
-fail:
- return ret;
+ return 0;
}
static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
@@ -1288,8 +1106,11 @@ static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
if (ret < 0)
return ret;
- if (f34->v7.in_bl_mode)
+ if (f34->v7.in_bl_mode) {
+ dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+ __func__);
return 0;
+ }
init_completion(&f34->v7.cmd_done);
@@ -1297,7 +1118,7 @@ static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
if (ret < 0)
return ret;
- ret = rmi_f34v7_wait_for_idle(f34, F34_ENABLE_WAIT_MS);
+ ret = rmi_f34v7_check_command_status(f34, F34_ENABLE_WAIT_MS);
if (ret < 0)
return ret;
@@ -1308,39 +1129,16 @@ int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
{
int ret = 0;
- f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev, f34->fn->irq_mask);
-
f34->v7.config_area = v7_UI_CONFIG_AREA;
f34->v7.image = fw->data;
ret = rmi_f34v7_parse_image_info(f34);
if (ret < 0)
- goto exit;
-
- if (!f34->v7.force_update && f34->v7.new_partition_table) {
- dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
- __func__);
- ret = -EINVAL;
- goto exit;
- }
+ return ret;
dev_info(&f34->fn->dev, "Firmware image OK\n");
- ret = rmi_f34v7_read_flash_status(f34);
- if (ret < 0)
- goto exit;
-
- if (f34->v7.in_bl_mode) {
- dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
- __func__);
- }
-
- rmi_f34v7_enter_flash_prog(f34);
-
- return 0;
-
-exit:
- return ret;
+ return rmi_f34v7_enter_flash_prog(f34);
}
int rmi_f34v7_probe(struct f34_data *f34)
@@ -1384,6 +1182,5 @@ int rmi_f34v7_probe(struct f34_data *f34)
if (ret < 0)
return ret;
- f34->v7.force_update = true;
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index c5ce907535ef..5c3da910b5b2 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -390,8 +390,8 @@ static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
{
struct f54_data *f54 = video_drvdata(file);
- strlcpy(cap->driver, F54_NAME, sizeof(cap->driver));
- strlcpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
+ strscpy(cap->driver, F54_NAME, sizeof(cap->driver));
+ strscpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"rmi4:%s", dev_name(&f54->fn->dev));
@@ -410,7 +410,7 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
i->type = V4L2_INPUT_TYPE_TOUCH;
- strlcpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
+ strscpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
return 0;
}
@@ -696,7 +696,7 @@ static int rmi_f54_probe(struct rmi_function *fn)
rmi_f54_set_input(f54, 0);
/* register video device */
- strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
+ strscpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
ret = v4l2_device_register(&fn->dev, &f54->v4l2);
if (ret) {
dev_err(&fn->dev, "Unable to register video dev.\n");
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 379e9240c2b3..3a92304f64fb 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -110,8 +110,8 @@ static int altera_ps2_probe(struct platform_device *pdev)
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
serio->close = altera_ps2_close;
- strlcpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 4408245b61d2..c391700fc4ae 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -126,8 +126,8 @@ static int amba_kmi_probe(struct amba_device *dev,
io->write = amba_kmi_write;
io->open = amba_kmi_open;
io->close = amba_kmi_close;
- strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name));
- strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
+ strscpy(io->name, dev_name(&dev->dev), sizeof(io->name));
+ strscpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
io->port_data = kmi;
io->dev.parent = &dev->dev;
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 1c0be299f179..ec93cb4573c3 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -159,8 +159,8 @@ static int ams_delta_serio_init(struct platform_device *pdev)
serio->id.type = SERIO_8042;
serio->open = ams_delta_serio_open;
serio->close = ams_delta_serio_close;
- strlcpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ strscpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
serio->dev.parent = &pdev->dev;
serio->port_data = priv;
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 974d7bfae0a0..9c9ce097f8bf 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -176,7 +176,7 @@ static int apbps2_of_probe(struct platform_device *ofdev)
priv->io->close = apbps2_close;
priv->io->write = apbps2_write;
priv->io->port_data = priv;
- strlcpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name));
+ strscpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name));
snprintf(priv->io->phys, sizeof(priv->io->phys),
"apbps2_%d", apbps2_idx++);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index d45009d654bf..3da751f4a6bf 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -7,9 +7,6 @@
* 82C710 C&T mouse port chip driver for Linux
*/
-/*
- */
-
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -170,7 +167,7 @@ static int ct82c710_probe(struct platform_device *dev)
ct82c710_port->open = ct82c710_open;
ct82c710_port->close = ct82c710_close;
ct82c710_port->write = ct82c710_write;
- strlcpy(ct82c710_port->name, "C&T 82c710 mouse port",
+ strscpy(ct82c710_port->name, "C&T 82c710 mouse port",
sizeof(ct82c710_port->name));
snprintf(ct82c710_port->phys, sizeof(ct82c710_port->phys),
"isa%16llx/serio0", (unsigned long long)CT82C710_DATA);
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index da2c67cb8642..633c7de49d67 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -361,7 +361,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s",
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->id.type = SERIO_8042;
serio->write = gscps2_write;
serio->open = gscps2_open;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 1a7b72a9016d..d62aefb2e245 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -334,9 +334,9 @@ static int hv_kbd_probe(struct hv_device *hv_dev,
hv_serio->dev.parent = &hv_dev->device;
hv_serio->id.type = SERIO_8042_XL;
hv_serio->port_data = kbd_dev;
- strlcpy(hv_serio->name, dev_name(&hv_dev->device),
+ strscpy(hv_serio->name, dev_name(&hv_dev->device),
sizeof(hv_serio->name));
- strlcpy(hv_serio->phys, dev_name(&hv_dev->device),
+ strscpy(hv_serio->phys, dev_name(&hv_dev->device),
sizeof(hv_serio->phys));
hv_serio->start = hv_kbd_start;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-acpipnpio.h
index 4fbec7bbecca..0778dc03cd9e 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _I8042_X86IA64IO_H
-#define _I8042_X86IA64IO_H
+#ifndef _I8042_ACPIPNPIO_H
+#define _I8042_ACPIPNPIO_H
+#include <linux/acpi.h>
#ifdef CONFIG_X86
#include <asm/x86_init.h>
@@ -1300,7 +1301,7 @@ static char i8042_pnp_aux_name[32];
static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
{
- strlcpy(dst, "PNP:", dst_size);
+ strscpy(dst, "PNP:", dst_size);
while (id) {
strlcat(dst, " ", dst_size);
@@ -1320,7 +1321,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
if (pnp_irq_valid(dev,0))
i8042_pnp_kbd_irq = pnp_irq(dev, 0);
- strlcpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
+ strscpy(i8042_pnp_kbd_name, did->id, sizeof(i8042_pnp_kbd_name));
if (strlen(pnp_dev_name(dev))) {
strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
@@ -1347,7 +1348,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
if (pnp_irq_valid(dev, 0))
i8042_pnp_aux_irq = pnp_irq(dev, 0);
- strlcpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
+ strscpy(i8042_pnp_aux_name, did->id, sizeof(i8042_pnp_aux_name));
if (strlen(pnp_dev_name(dev))) {
strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
@@ -1453,9 +1454,14 @@ static int __init i8042_pnp_init(void)
return -ENODEV;
#else
pr_info("PNP: No PS/2 controller found.\n");
+#if defined(__loongarch__)
+ if (acpi_disabled == 0)
+ return -ENODEV;
+#else
if (x86_platform.legacy.i8042 !=
X86_LEGACY_I8042_EXPECTED_PRESENT)
return -ENODEV;
+#endif
pr_info("Probing ports directly.\n");
return 0;
#endif
@@ -1665,4 +1671,4 @@ static inline void i8042_platform_exit(void)
i8042_pnp_exit();
}
-#endif /* _I8042_X86IA64IO_H */
+#endif /* _I8042_ACPIPNPIO_H */
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index fce76812843b..c712c1fe0605 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -3,6 +3,7 @@
#define _I8042_SPARCIO_H
#include <linux/of_device.h>
+#include <linux/types.h>
#include <asm/io.h>
#include <asm/oplib.h>
@@ -103,12 +104,25 @@ static struct platform_driver sparc_i8042_driver = {
.remove = sparc_i8042_remove,
};
-static int __init i8042_platform_init(void)
+static bool i8042_is_mr_coffee(void)
{
- struct device_node *root = of_find_node_by_path("/");
- const char *name = of_get_property(root, "name", NULL);
+ struct device_node *root;
+ const char *name;
+ bool is_mr_coffee;
+
+ root = of_find_node_by_path("/");
+
+ name = of_get_property(root, "name", NULL);
+ is_mr_coffee = name && !strcmp(name, "SUNW,JavaStation-1");
- if (name && !strcmp(name, "SUNW,JavaStation-1")) {
+ of_node_put(root);
+
+ return is_mr_coffee;
+}
+
+static int __init i8042_platform_init(void)
+{
+ if (i8042_is_mr_coffee()) {
/* Hardcoded values for MrCoffee. */
i8042_kbd_irq = i8042_aux_irq = 13 | 0x20;
kbd_iobase = ioremap(0x71300060, 8);
@@ -136,10 +150,7 @@ static int __init i8042_platform_init(void)
static inline void i8042_platform_exit(void)
{
- struct device_node *root = of_find_node_by_path("/");
- const char *name = of_get_property(root, "name", NULL);
-
- if (!name || strcmp(name, "SUNW,JavaStation-1"))
+ if (!i8042_is_mr_coffee())
platform_driver_unregister(&sparc_i8042_driver);
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 3fc0a89cc785..f9486495baef 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1341,9 +1341,9 @@ static int i8042_create_kbd_port(void)
serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
- strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
- strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
- strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
+ strscpy(serio->name, "i8042 KBD port", sizeof(serio->name));
+ strscpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+ strscpy(serio->firmware_id, i8042_kbd_firmware_id,
sizeof(serio->firmware_id));
set_primary_fwnode(&serio->dev, i8042_kbd_fwnode);
@@ -1371,15 +1371,15 @@ static int i8042_create_aux_port(int idx)
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
- strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
- strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
- strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+ strscpy(serio->name, "i8042 AUX port", sizeof(serio->name));
+ strscpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+ strscpy(serio->firmware_id, i8042_aux_firmware_id,
sizeof(serio->firmware_id));
serio->close = i8042_port_close;
} else {
snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, idx + 1);
- strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+ strscpy(serio->firmware_id, i8042_aux_firmware_id,
sizeof(serio->firmware_id));
}
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index 55381783dc82..adb5173372d3 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -19,8 +19,8 @@
#include "i8042-snirm.h"
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
-#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
-#include "i8042-x86ia64io.h"
+#elif defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
+#include "i8042-acpipnpio.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 250e213cc80c..3e19344eda93 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/kmsan-checks.h>
#include <linux/serio.h>
#include <linux/i8042.h>
#include <linux/libps2.h>
@@ -294,9 +295,11 @@ int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command)
serio_pause_rx(ps2dev->serio);
- if (param)
+ if (param) {
for (i = 0; i < receive; i++)
param[i] = ps2dev->cmdbuf[(receive - 1) - i];
+ kmsan_unpoison_memory(param, receive);
+ }
if (ps2dev->cmdcnt &&
(command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) {
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 59de8d9b6710..04d2db982fb8 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -199,8 +199,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
kb_serio->close = olpc_apsp_close;
kb_serio->port_data = priv;
kb_serio->dev.parent = &pdev->dev;
- strlcpy(kb_serio->name, "sp keyboard", sizeof(kb_serio->name));
- strlcpy(kb_serio->phys, "sp/serio0", sizeof(kb_serio->phys));
+ strscpy(kb_serio->name, "sp keyboard", sizeof(kb_serio->name));
+ strscpy(kb_serio->phys, "sp/serio0", sizeof(kb_serio->phys));
priv->kbio = kb_serio;
serio_register_port(kb_serio);
@@ -216,8 +216,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
pad_serio->close = olpc_apsp_close;
pad_serio->port_data = priv;
pad_serio->dev.parent = &pdev->dev;
- strlcpy(pad_serio->name, "sp touchpad", sizeof(pad_serio->name));
- strlcpy(pad_serio->phys, "sp/serio1", sizeof(pad_serio->phys));
+ strscpy(pad_serio->name, "sp touchpad", sizeof(pad_serio->name));
+ strscpy(pad_serio->phys, "sp/serio1", sizeof(pad_serio->phys));
priv->padio = pad_serio;
serio_register_port(pad_serio);
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 51b68501896c..0d54895428f5 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -169,7 +169,7 @@ static struct serio *parkbd_allocate_serio(void)
if (serio) {
serio->id.type = parkbd_mode;
serio->write = parkbd_write;
- strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
+ strscpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", parkbd_dev->port->name);
}
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index bedf75de0a2c..05878750f2c2 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -149,8 +149,8 @@ static int pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
serio->write = pcips2_write;
serio->open = pcips2_open;
serio->close = pcips2_close;
- strlcpy(serio->name, pci_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->name, pci_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 9b02dd5dd2b9..bc1dc484389b 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -449,8 +449,8 @@ static int ps2_gpio_probe(struct platform_device *pdev)
serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
serio->port_data = drvdata;
serio->dev.parent = dev;
- strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(dev), sizeof(serio->phys));
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
index 0071dd5ebcc2..902e81826fbf 100644
--- a/drivers/input/serio/ps2mult.c
+++ b/drivers/input/serio/ps2mult.c
@@ -131,7 +131,7 @@ static int ps2mult_create_port(struct ps2mult *psm, int i)
if (!serio)
return -ENOMEM;
- strlcpy(serio->name, "TQC PS/2 Multiplexer", sizeof(serio->name));
+ strscpy(serio->name, "TQC PS/2 Multiplexer", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys),
"%s/port%d", mx_serio->phys, i);
serio->id.type = SERIO_8042;
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index bd248398556a..ba04058fc3cb 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -10,9 +10,6 @@
* Q40 PS/2 keyboard controller driver for Linux/m68k
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
@@ -126,8 +123,8 @@ static int q40kbd_probe(struct platform_device *pdev)
port->close = q40kbd_close;
port->port_data = q40kbd;
port->dev.parent = &pdev->dev;
- strlcpy(port->name, "Q40 Kbd Port", sizeof(port->name));
- strlcpy(port->phys, "Q40", sizeof(port->phys));
+ strscpy(port->name, "Q40 Kbd Port", sizeof(port->name));
+ strscpy(port->phys, "Q40", sizeof(port->phys));
q40kbd_stop();
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 37fe6a5711ea..ce420eb1f51b 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -8,9 +8,6 @@
* Acorn RiscPC PS/2 keyboard controller driver for Linux/ARM
*/
-/*
- */
-
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/serio.h>
@@ -128,8 +125,8 @@ static int rpckbd_probe(struct platform_device *dev)
serio->close = rpckbd_close;
serio->dev.parent = &dev->dev;
serio->port_data = rpckbd;
- strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
- strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
+ strscpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
+ strscpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
platform_set_drvdata(dev, serio);
serio_register_port(serio);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 68fac4801e2e..2724c3aa512c 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -267,8 +267,8 @@ static int ps2_probe(struct sa1111_dev *dev)
serio->write = ps2_write;
serio->open = ps2_open;
serio->close = ps2_close;
- strlcpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index ec117be3d8d8..15ce3202322f 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -7,9 +7,6 @@
* Copyright (c) 2003 Daniele Bellucci
*/
-/*
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/stddef.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 669a728095b8..7f7ef0e3a749 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -171,7 +171,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
if (!serio)
return -ENOMEM;
- strlcpy(serio->name, "Serial port", sizeof(serio->name));
+ strscpy(serio->name, "Serial port", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", tty_name(tty));
serio->id = serport->id;
serio->id.type = SERIO_RS232;
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
index f15ed3dcdb9b..eb262640192e 100644
--- a/drivers/input/serio/sun4i-ps2.c
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -256,8 +256,8 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
serio->close = sun4i_ps2_close;
serio->port_data = drvdata;
serio->dev.parent = dev;
- strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
- strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+ strscpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strscpy(serio->phys, dev_name(dev), sizeof(serio->phys));
/* shutoff interrupt */
writel(0, drvdata->reg_base + PS2_REG_GCTL);
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 56c7e471ac32..b20e5a1afbcc 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -9,9 +9,6 @@
* v3.2 - Added sysfs support
*/
-/*
- */
-
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -155,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
acecad->input = input_dev;
if (dev->manufacturer)
- strlcpy(acecad->name, dev->manufacturer, sizeof(acecad->name));
+ strscpy(acecad->name, dev->manufacturer, sizeof(acecad->name));
if (dev->product) {
if (dev->manufacturer)
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 24ec4844a5c3..baabc51547b8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1617,7 +1617,7 @@ static ssize_t show_firmwareCode(struct device *dev, struct device_attribute *at
static DEVICE_ATTR(firmware_code, S_IRUGO, show_firmwareCode, NULL);
-static struct attribute *aiptek_attributes[] = {
+static struct attribute *aiptek_dev_attrs[] = {
&dev_attr_size.attr,
&dev_attr_pointer_mode.attr,
&dev_attr_coordinate_mode.attr,
@@ -1641,9 +1641,7 @@ static struct attribute *aiptek_attributes[] = {
NULL
};
-static const struct attribute_group aiptek_attribute_group = {
- .attrs = aiptek_attributes,
-};
+ATTRIBUTE_GROUPS(aiptek_dev);
/***********************************************************************
* This routine is called when a tablet has been identified. It basically
@@ -1842,26 +1840,16 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
*/
usb_set_intfdata(intf, aiptek);
- /* Set up the sysfs files
- */
- err = sysfs_create_group(&intf->dev.kobj, &aiptek_attribute_group);
- if (err) {
- dev_warn(&intf->dev, "cannot create sysfs group err: %d\n",
- err);
- goto fail3;
- }
-
/* Register the tablet as an Input Device
*/
err = input_register_device(aiptek->inputdev);
if (err) {
dev_warn(&intf->dev,
"input_register_device returned err: %d\n", err);
- goto fail4;
+ goto fail3;
}
return 0;
- fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
aiptek->data_dma);
@@ -1886,7 +1874,6 @@ static void aiptek_disconnect(struct usb_interface *intf)
*/
usb_kill_urb(aiptek->urb);
input_unregister_device(aiptek->inputdev);
- sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
usb_free_urb(aiptek->urb);
usb_free_coherent(interface_to_usbdev(intf),
AIPTEK_PACKET_LENGTH,
@@ -1900,6 +1887,7 @@ static struct usb_driver aiptek_driver = {
.probe = aiptek_probe,
.disconnect = aiptek_disconnect,
.id_table = aiptek_ids,
+ .dev_groups = aiptek_dev_groups,
};
module_usb_driver(aiptek_driver);
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6d58443bb3e9..9bc631518b92 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -5,9 +5,6 @@
* Copyright (c) 2010 Xing Wei <weixing@hanwang.com.cn>
*/
-/*
- */
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -356,7 +353,7 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
usb_make_path(dev, hanwang->phys, sizeof(hanwang->phys));
strlcat(hanwang->phys, "/input0", sizeof(hanwang->phys));
- strlcpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name));
+ strscpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name));
input_dev->name = hanwang->name;
input_dev->phys = hanwang->phys;
usb_to_input_id(dev, &input_dev->id);
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index c608ac505d1b..d836d3dcc6a2 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -319,7 +319,7 @@ static int pegasus_probe(struct usb_interface *intf,
pegasus->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (dev->manufacturer)
- strlcpy(pegasus->name, dev->manufacturer,
+ strscpy(pegasus->name, dev->manufacturer,
sizeof(pegasus->name));
if (dev->product) {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 2d70c945b20a..dc90a3ea51ee 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1335,7 +1335,7 @@ config TOUCHSCREEN_ZFORCE
config TOUCHSCREEN_COLIBRI_VF50
tristate "Toradex Colibri on board touchscreen driver"
- depends on IIO && VF610_ADC
+ depends on IIO
depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a Colibri VF50 and plan to use
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 4eedea08b0b5..ccecd1441f0b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2497,8 +2497,8 @@ static int mxt_vidioc_querycap(struct file *file, void *priv,
{
struct mxt_data *data = video_drvdata(file);
- strlcpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
- strlcpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
+ strscpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
+ strscpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"I2C:%s", dev_name(&data->client->dev));
return 0;
@@ -2514,11 +2514,11 @@ static int mxt_vidioc_enum_input(struct file *file, void *priv,
switch (i->index) {
case MXT_V4L_INPUT_REFS:
- strlcpy(i->name, "Mutual Capacitance References",
+ strscpy(i->name, "Mutual Capacitance References",
sizeof(i->name));
break;
case MXT_V4L_INPUT_DELTAS:
- strlcpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
+ strscpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
break;
}
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index c33e63ca6142..2deae5a6823a 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -10,6 +10,7 @@
* Copyright (c) 2008 QUALCOMM USA, INC.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -19,10 +20,9 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/input/auo-pixcir-ts.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
/*
* Coordinate calculation:
@@ -69,6 +69,16 @@
#define AUO_PIXCIR_INT_RELEASE (1 << 4)
#define AUO_PIXCIR_INT_ENABLE (1 << 3)
#define AUO_PIXCIR_INT_POL_HIGH (1 << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * compare coordinates: interrupt is asserted when coordinates change
+ * indicate touch: interrupt is asserted during touch
+ */
+#define AUO_PIXCIR_INT_PERIODICAL 0x00
+#define AUO_PIXCIR_INT_COMP_COORD 0x01
+#define AUO_PIXCIR_INT_TOUCH_IND 0x02
#define AUO_PIXCIR_INT_MODE_MASK 0x03
/*
@@ -103,10 +113,14 @@
struct auo_pixcir_ts {
struct i2c_client *client;
struct input_dev *input;
- const struct auo_pixcir_ts_platdata *pdata;
+ struct gpio_desc *gpio_int;
+ struct gpio_desc *gpio_rst;
char phys[32];
- /* special handling for touch_indicate interupt mode */
+ unsigned int x_max;
+ unsigned int y_max;
+
+ /* special handling for touch_indicate interrupt mode */
bool touch_ind_mode;
wait_queue_head_t wait;
@@ -125,7 +139,6 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
struct auo_point_t *point)
{
struct i2c_client *client = ts->client;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
uint8_t raw_coord[8];
uint8_t raw_area[4];
int i, ret;
@@ -152,8 +165,8 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
point[i].coord_y =
raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
- if (point[i].coord_x > pdata->x_max ||
- point[i].coord_y > pdata->y_max) {
+ if (point[i].coord_x > ts->x_max ||
+ point[i].coord_y > ts->y_max) {
dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
point[i].coord_x, point[i].coord_y);
point[i].coord_x = point[i].coord_y = 0;
@@ -171,7 +184,6 @@ static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
{
struct auo_pixcir_ts *ts = dev_id;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
int i;
int ret;
@@ -182,7 +194,7 @@ static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
/* check for up event in touch touch_ind_mode */
if (ts->touch_ind_mode) {
- if (gpio_get_value(pdata->gpio_int) == 0) {
+ if (gpiod_get_value_cansleep(ts->gpio_int) == 0) {
input_mt_sync(ts->input);
input_report_key(ts->input, BTN_TOUCH, 0);
input_sync(ts->input);
@@ -278,11 +290,9 @@ static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
return 0;
}
-static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
- int int_setting)
+static int auo_pixcir_int_config(struct auo_pixcir_ts *ts, int int_setting)
{
struct i2c_client *client = ts->client;
- const struct auo_pixcir_ts_platdata *pdata = ts->pdata;
int ret;
ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
@@ -304,7 +314,7 @@ static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
return ret;
}
- ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+ ts->touch_ind_mode = int_setting == AUO_PIXCIR_INT_TOUCH_IND;
return 0;
}
@@ -465,78 +475,22 @@ unlock:
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops,
auo_pixcir_suspend, auo_pixcir_resume);
-#ifdef CONFIG_OF
-static struct auo_pixcir_ts_platdata *auo_pixcir_parse_dt(struct device *dev)
-{
- struct auo_pixcir_ts_platdata *pdata;
- struct device_node *np = dev->of_node;
-
- if (!np)
- return ERR_PTR(-ENOENT);
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_int = of_get_gpio(np, 0);
- if (!gpio_is_valid(pdata->gpio_int)) {
- dev_err(dev, "failed to get interrupt gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata->gpio_rst = of_get_gpio(np, 1);
- if (!gpio_is_valid(pdata->gpio_rst)) {
- dev_err(dev, "failed to get reset gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
- dev_err(dev, "failed to get x-size property\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(np, "y-size", &pdata->y_max)) {
- dev_err(dev, "failed to get y-size property\n");
- return ERR_PTR(-EINVAL);
- }
-
- /* default to asserting the interrupt when the screen is touched */
- pdata->int_setting = AUO_PIXCIR_INT_TOUCH_IND;
-
- return pdata;
-}
-#else
-static struct auo_pixcir_ts_platdata *auo_pixcir_parse_dt(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static void auo_pixcir_reset(void *data)
{
struct auo_pixcir_ts *ts = data;
- gpio_set_value(ts->pdata->gpio_rst, 0);
+ gpiod_set_value_cansleep(ts->gpio_rst, 1);
}
static int auo_pixcir_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct auo_pixcir_ts_platdata *pdata;
struct auo_pixcir_ts *ts;
struct input_dev *input_dev;
int version;
int error;
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = auo_pixcir_parse_dt(&client->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
-
- ts = devm_kzalloc(&client->dev,
- sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
@@ -546,7 +500,6 @@ static int auo_pixcir_probe(struct i2c_client *client,
return -ENOMEM;
}
- ts->pdata = pdata;
ts->client = client;
ts->input = input_dev;
ts->touch_ind_mode = 0;
@@ -556,6 +509,16 @@ static int auo_pixcir_probe(struct i2c_client *client,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
+ if (device_property_read_u32(&client->dev, "x-size", &ts->x_max)) {
+ dev_err(&client->dev, "failed to get x-size property\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&client->dev, "y-size", &ts->y_max)) {
+ dev_err(&client->dev, "failed to get y-size property\n");
+ return -EINVAL;
+ }
+
input_dev->name = "AUO-Pixcir touchscreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = BUS_I2C;
@@ -569,39 +532,42 @@ static int auo_pixcir_probe(struct i2c_client *client,
__set_bit(BTN_TOUCH, input_dev->keybit);
/* For single touch */
- input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, ts->y_max, 0, 0);
/* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
- pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
- pdata->y_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
- AUO_PIXCIR_MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
- AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ 0, AUO_PIXCIR_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
input_set_drvdata(ts->input, ts);
- error = devm_gpio_request_one(&client->dev, pdata->gpio_int,
- GPIOF_DIR_IN, "auo_pixcir_ts_int");
+ ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
if (error) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_int, error);
+ dev_err(&client->dev,
+ "request of int gpio failed: %d\n", error);
return error;
}
- error = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
- GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
- "auo_pixcir_ts_rst");
+ gpiod_set_consumer_name(ts->gpio_int, "auo_pixcir_ts_int");
+
+ /* Take the chip out of reset */
+ ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
if (error) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_rst, error);
+ dev_err(&client->dev,
+ "request of reset gpio failed: %d\n", error);
return error;
}
+ gpiod_set_consumer_name(ts->gpio_rst, "auo_pixcir_ts_rst");
+
error = devm_add_action_or_reset(&client->dev, auo_pixcir_reset, ts);
if (error) {
dev_err(&client->dev, "failed to register reset action, %d\n",
@@ -619,13 +585,14 @@ static int auo_pixcir_probe(struct i2c_client *client,
dev_info(&client->dev, "firmware version 0x%X\n", version);
- error = auo_pixcir_int_config(ts, pdata->int_setting);
+ /* default to asserting the interrupt when the screen is touched */
+ error = auo_pixcir_int_config(ts, AUO_PIXCIR_INT_TOUCH_IND);
if (error)
return error;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, auo_pixcir_interrupt,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ IRQF_ONESHOT,
input_dev->name, ts);
if (error) {
dev_err(&client->dev, "irq %d requested failed, %d\n",
diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c
index f9ca5502ac8c..c421f4be2700 100644
--- a/drivers/input/touchscreen/chipone_icn8505.c
+++ b/drivers/input/touchscreen/chipone_icn8505.c
@@ -364,32 +364,20 @@ static irqreturn_t icn8505_irq(int irq, void *dev_id)
static int icn8505_probe_acpi(struct icn8505_data *icn8505, struct device *dev)
{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- const char *subsys = "unknown";
- struct acpi_device *adev;
- union acpi_object *obj;
- acpi_status status;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ const char *subsys;
+ int error;
- status = acpi_evaluate_object(adev->handle, "_SUB", NULL, &buffer);
- if (ACPI_SUCCESS(status)) {
- obj = buffer.pointer;
- if (obj->type == ACPI_TYPE_STRING)
- subsys = obj->string.pointer;
- else
- dev_warn(dev, "Warning ACPI _SUB did not return a string\n");
- } else {
- dev_warn(dev, "Warning ACPI _SUB failed: %#x\n", status);
- buffer.pointer = NULL;
- }
+ subsys = acpi_get_subsystem_id(ACPI_HANDLE(dev));
+ error = PTR_ERR_OR_ZERO(subsys);
+ if (error == -ENODATA)
+ subsys = "unknown";
+ else if (error)
+ return error;
snprintf(icn8505->firmware_name, sizeof(icn8505->firmware_name),
"chipone/icn8505-%s.fw", subsys);
- kfree(buffer.pointer);
+ kfree_const(subsys);
return 0;
}
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5fb441387fe5..9ac1378610bc 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -912,8 +912,8 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
p = strchr(rdbuf, '*');
if (p)
*p++ = '\0';
- strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
- strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ strscpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+ strscpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else if (!strncasecmp(rdbuf, "EP0", 3)) {
tsdata->version = EDT_M12;
@@ -926,8 +926,8 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
p = strchr(rdbuf, '*');
if (p)
*p++ = '\0';
- strlcpy(model_name, rdbuf, EDT_NAME_LEN);
- strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ strscpy(model_name, rdbuf, EDT_NAME_LEN);
+ strscpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else {
/* If it is not an EDT M06/M12 touchscreen, then the model
* detection is a bit hairy. The different ft5x06
@@ -945,7 +945,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
if (error)
return error;
- strlcpy(fw_version, rdbuf, 2);
+ strscpy(fw_version, rdbuf, 2);
error = edt_ft5x06_ts_readwrite(client, 1, "\xA8",
1, rdbuf);
@@ -981,7 +981,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
1, rdbuf);
if (error)
return error;
- strlcpy(fw_version, rdbuf, 1);
+ strscpy(fw_version, rdbuf, 1);
snprintf(model_name, EDT_NAME_LEN,
"EVERVISION-FT5726NEi");
break;
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index e07e8e0fe8ea..5a5f9da73fa1 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -7,9 +7,6 @@
* Gunze AHL-51S touchscreen driver for Linux
*/
-/*
- */
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 12f2562b0141..8ddb3f7d307a 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -939,8 +939,8 @@ static int sur40_vidioc_querycap(struct file *file, void *priv,
{
struct sur40_state *sur40 = video_drvdata(file);
- strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
- strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
+ strscpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
+ strscpy(cap->card, DRIVER_LONG, sizeof(cap->card));
usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
return 0;
}
@@ -952,7 +952,7 @@ static int sur40_vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
i->type = V4L2_INPUT_TYPE_TOUCH;
i->std = V4L2_STD_UNKNOWN;
- strlcpy(i->name, "In-Cell Sensor", sizeof(i->name));
+ strscpy(i->name, "In-Cell Sensor", sizeof(i->name));
i->capabilities = 0;
return 0;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 3dda6eaabdab..d6d04b9f04fc 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1708,7 +1708,7 @@ static int usbtouch_probe(struct usb_interface *intf,
usbtouch->input = input_dev;
if (udev->manufacturer)
- strlcpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name));
+ strscpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 691285ace228..928c5ee3ac36 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -625,7 +625,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
/* For backwards-compatibility we compose the basename based on
* capabilities and then just append the tool type
*/
- strlcpy(basename, "Wacom Serial", sizeof(basename));
+ strscpy(basename, "Wacom Serial", sizeof(basename));
err_pen = w8001_setup_pen(w8001, basename, sizeof(basename));
err_touch = w8001_setup_touch(w8001, basename, sizeof(basename));
@@ -635,7 +635,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
}
if (!err_pen) {
- strlcpy(w8001->pen_name, basename, sizeof(w8001->pen_name));
+ strscpy(w8001->pen_name, basename, sizeof(w8001->pen_name));
strlcat(w8001->pen_name, " Pen", sizeof(w8001->pen_name));
input_dev_pen->name = w8001->pen_name;
@@ -651,7 +651,7 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
}
if (!err_touch) {
- strlcpy(w8001->touch_name, basename, sizeof(w8001->touch_name));
+ strscpy(w8001->touch_name, basename, sizeof(w8001->touch_name));
strlcat(w8001->touch_name, " Finger",
sizeof(w8001->touch_name));
input_dev_touch->name = w8001->touch_name;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 808f6e7a8048..25debded65a8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1057,29 +1057,25 @@ EXPORT_SYMBOL_GPL(icc_provider_add);
/**
* icc_provider_del() - delete previously added interconnect provider
* @provider: the interconnect provider that will be removed from topology
- *
- * Return: 0 on success, or an error code otherwise
*/
-int icc_provider_del(struct icc_provider *provider)
+void icc_provider_del(struct icc_provider *provider)
{
mutex_lock(&icc_lock);
if (provider->users) {
pr_warn("interconnect provider still has %d users\n",
provider->users);
mutex_unlock(&icc_lock);
- return -EBUSY;
+ return;
}
if (!list_empty(&provider->nodes)) {
pr_warn("interconnect provider still has nodes\n");
mutex_unlock(&icc_lock);
- return -EBUSY;
+ return;
}
list_del(&provider->provider_list);
mutex_unlock(&icc_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(icc_provider_del);
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 48ffd59953bf..823d9be9771a 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -324,13 +324,13 @@ provider_del:
}
EXPORT_SYMBOL_GPL(imx_icc_register);
-int imx_icc_unregister(struct platform_device *pdev)
+void imx_icc_unregister(struct platform_device *pdev)
{
struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
imx_icc_unregister_nodes(&imx_provider->provider);
- return icc_provider_del(&imx_provider->provider);
+ icc_provider_del(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
index e0a2ee173ecd..895907cdcb3b 100644
--- a/drivers/interconnect/imx/imx.h
+++ b/drivers/interconnect/imx/imx.h
@@ -103,6 +103,6 @@ int imx_icc_register(struct platform_device *pdev,
struct imx_icc_node_desc *nodes,
int nodes_count,
struct imx_icc_noc_setting *noc_settings);
-int imx_icc_unregister(struct platform_device *pdev);
+void imx_icc_unregister(struct platform_device *pdev);
#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
index ae797412db96..b43325364aa3 100644
--- a/drivers/interconnect/imx/imx8mm.c
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -88,7 +88,9 @@ static int imx8mm_icc_probe(struct platform_device *pdev)
static int imx8mm_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mm_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
index 1ce94c5bdd8c..8ce6d8e4bf5e 100644
--- a/drivers/interconnect/imx/imx8mn.c
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -77,7 +77,9 @@ static int imx8mn_icc_probe(struct platform_device *pdev)
static int imx8mn_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mn_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c
index 5f1c83ed157b..8bfaf173f1da 100644
--- a/drivers/interconnect/imx/imx8mp.c
+++ b/drivers/interconnect/imx/imx8mp.c
@@ -242,7 +242,9 @@ static int imx8mp_icc_probe(struct platform_device *pdev)
static int imx8mp_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mp_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index 7f00a0511c6e..b6fb71305c99 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -87,7 +87,9 @@ static int imx8mq_icc_probe(struct platform_device *pdev)
static int imx8mq_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mq_icc_driver = {
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 25d5b4baf6f6..1a1c941635a2 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config INTERCONNECT_QCOM
- bool "Qualcomm Network-on-Chip interconnect drivers"
+ tristate "Qualcomm Network-on-Chip interconnect drivers"
depends on ARCH_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
diff --git a/drivers/interconnect/qcom/icc-common.c b/drivers/interconnect/qcom/icc-common.c
index 0822ce207b5d..f27f4fdc4531 100644
--- a/drivers/interconnect/qcom/icc-common.c
+++ b/drivers/interconnect/qcom/icc-common.c
@@ -5,6 +5,7 @@
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "icc-common.h"
@@ -32,3 +33,5 @@ struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 7f6a70e0256a..39e43b957599 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -563,6 +563,8 @@ int qnoc_remove(struct platform_device *pdev)
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
EXPORT_SYMBOL(qnoc_remove);
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 114bb8f64573..fd17291c61eb 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -251,7 +251,9 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_remove);
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 6fa0ad90fc3d..5ea192f1141d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -749,7 +749,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static const struct of_device_id msm8974_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 4198656f4e59..ddbdf0943f94 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -217,7 +217,9 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static int qcom_osm_l3_probe(struct platform_device *pdev)
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index e821fd0b2f66..e3a12e3d6e06 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1933,7 +1933,9 @@ static int qnoc_remove(struct platform_device *pdev)
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c5cb5bee8b6..dc5f7a156ff5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -67,6 +67,17 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
If unsure, say N here.
+config IOMMU_IO_PGTABLE_DART
+ bool "Apple DART Formats"
+ select IOMMU_IO_PGTABLE
+ depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ help
+ Enable support for the Apple DART pagetable formats. These include
+ the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
+ SoCs.
+
+ If unsure, say N here.
+
endmenu
config IOMMU_DEBUGFS
@@ -137,7 +148,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
- bool
+ def_bool ARM64 || IA64 || X86
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
@@ -294,7 +305,7 @@ config APPLE_DART
tristate "Apple DART IOMMU Support"
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE
help
Support for Apple DART (Device Address Resolution Table) IOMMUs
@@ -476,7 +487,6 @@ config VIRTIO_IOMMU
depends on VIRTIO
depends on (ARM64 || X86)
select IOMMU_API
- select IOMMU_DMA
select INTERVAL_TREE
select ACPI_VIOT if ACPI
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 44475a9b3eea..cc9f381013c3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index a3cbafb603f5..9b5fc3356bf2 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -9,7 +9,6 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
- select IOMMU_DMA
select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index a935f8f4b974..773d8aa00283 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 84e5bb1bf01b..c160a332ce33 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -18,7 +18,6 @@ extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5b1019dab328..1d0a70c85333 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -94,6 +94,7 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
+#define FEATURE_GIOSUP (1ULL<<48)
#define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_SNP (1ULL<<63)
@@ -276,6 +277,8 @@
* 512GB Pages are not supported due to a hardware bug
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+/* 4K, 2MB, 1G page sizes are supported */
+#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@@ -376,6 +379,7 @@
#define DTE_FLAG_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32)
+#define DTE_FLAG_GIOV (1ULL << 54)
#define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56)
@@ -434,6 +438,7 @@
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
+#define PD_GIOV_MASK (1UL << 4) /* domain enable GIOV support */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -456,6 +461,8 @@ struct irq_remap_table {
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
+extern const struct iommu_ops amd_iommu_ops;
+
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
@@ -526,7 +533,8 @@ struct amd_io_pgtable {
struct io_pgtable iop;
int mode;
u64 *root;
- atomic64_t pt_root; /* pgtable root and pgtable mode */
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+ u64 *pgd; /* v2 pgtable pgd pointer */
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index fdc642362c14..1a2d425bf568 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -95,8 +95,6 @@
* out of it.
*/
-extern const struct iommu_ops amd_iommu_ops;
-
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
@@ -2068,6 +2066,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
+ !iommu_feature(iommu, FEATURE_GT)) {
+ pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (iommu_default_passthrough()) {
+ pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+ }
+
if (is_rd890_iommu(iommu->dev)) {
int i, j;
@@ -2146,6 +2155,8 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
+ if (amd_iommu_pgtable == AMD_IOMMU_V2)
+ pr_info("V2 page table enabled\n");
}
static int __init amd_iommu_init_pci(void)
@@ -2168,20 +2179,13 @@ static int __init amd_iommu_init_pci(void)
/*
* Order is important here to make sure any unity map requirements are
* fulfilled. The unity mappings are created and written to the device
- * table during the amd_iommu_init_api() call.
+ * table during the iommu_init_pci() call.
*
* After that we call init_device_table_dma() to make sure any
* uninitialized DTE will block DMA, and in the end we flush the caches
* of all IOMMUs to make sure the changes to the device table are
* active.
*/
- ret = amd_iommu_init_api();
- if (ret) {
- pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
- ret);
- goto out;
- }
-
for_each_pci_segment(pci_seg)
init_device_table_dma(pci_seg);
@@ -3366,17 +3370,30 @@ static int __init parse_amd_iommu_intr(char *str)
static int __init parse_amd_iommu_options(char *str)
{
- for (; *str; ++str) {
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
iommu_set_dma_strict();
- }
- if (strncmp(str, "force_enable", 12) == 0)
+ } else if (strncmp(str, "force_enable", 12) == 0) {
amd_iommu_force_enable = true;
- if (strncmp(str, "off", 3) == 0)
+ } else if (strncmp(str, "off", 3) == 0) {
amd_iommu_disabled = true;
- if (strncmp(str, "force_isolation", 15) == 0)
+ } else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
+ } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
}
return 1;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 7d4b61e5db47..ace0e9b8b913 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist);
@@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte;
int ret, i, count;
- BUG_ON(!IS_ALIGNED(iova, size));
- BUG_ON(!IS_ALIGNED(paddr, size));
+ BUG_ON(!IS_ALIGNED(iova, pgsize));
+ BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK))
goto out;
- count = PAGE_SIZE_PTE_COUNT(size);
- pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+ while (pgcount > 0) {
+ count = PAGE_SIZE_PTE_COUNT(pgsize);
+ pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
- ret = -ENOMEM;
- if (!pte)
- goto out;
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
- for (i = 0; i < count; ++i)
- free_clear_pte(&pte[i], pte[i], &freelist);
+ for (i = 0; i < count; ++i)
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
- updated = true;
+ if (!list_empty(&freelist))
+ updated = true;
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ iova += pgsize;
+ paddr += pgsize;
+ pgcount--;
+ if (mapped)
+ *mapped += pgsize;
+ }
ret = 0;
@@ -426,17 +435,18 @@ out:
return ret;
}
-static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t size,
- struct iommu_iotlb_gather *gather)
+static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
+ size_t size = pgcount << __ffs(pgsize);
- BUG_ON(!is_power_of_2(size));
+ BUG_ON(!is_power_of_2(pgsize));
unmapped = 0;
@@ -448,14 +458,14 @@ static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
+ } else {
+ return unmapped;
}
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
return unmapped;
}
@@ -514,8 +524,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
- pgtable->iop.ops.map = iommu_v1_map_page;
- pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.map_pages = iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
new file mode 100644
index 000000000000..8638ddf6fb3b
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table v2 allocator.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Author: Vasant Hegde <vasant.hegde@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
+#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
+#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
+#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
+#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
+#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
+#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
+#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
+#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
+
+#define MAX_PTRS_PER_PAGE 512
+
+#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
+#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
+
+
+static inline int get_pgtable_level(void)
+{
+ /* 5 level page table is not supported */
+ return PAGE_MODE_4_LEVEL;
+}
+
+static inline bool is_large_pte(u64 pte)
+{
+ return (pte & IOMMU_PAGE_PSE);
+}
+
+static inline void *alloc_pgtable_page(void)
+{
+ return (void *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline u64 set_pgtable_attr(u64 *page)
+{
+ u64 prot;
+
+ prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
+ prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ return (iommu_virt_to_phys(page) | prot);
+}
+
+static inline void *get_pgtable_pte(u64 pte)
+{
+ return iommu_phys_to_virt(pte & PM_ADDR_MASK);
+}
+
+static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
+{
+ u64 pte;
+
+ pte = __sme_set(paddr & PM_ADDR_MASK);
+ pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
+ pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ if (prot & IOMMU_PROT_IW)
+ pte |= IOMMU_PAGE_RW;
+
+ /* Large page */
+ if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
+ pte |= IOMMU_PAGE_PSE;
+
+ return pte;
+}
+
+static inline u64 get_alloc_page_size(u64 size)
+{
+ if (size >= IOMMU_PAGE_SIZE_1G)
+ return IOMMU_PAGE_SIZE_1G;
+
+ if (size >= IOMMU_PAGE_SIZE_2M)
+ return IOMMU_PAGE_SIZE_2M;
+
+ return PAGE_SIZE;
+}
+
+static inline int page_size_to_level(u64 pg_size)
+{
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ return PAGE_MODE_3_LEVEL;
+ if (pg_size == IOMMU_PAGE_SIZE_2M)
+ return PAGE_MODE_2_LEVEL;
+
+ return PAGE_MODE_1_LEVEL;
+}
+
+static inline void free_pgtable_page(u64 *pt)
+{
+ free_page((unsigned long)pt);
+}
+
+static void free_pgtable(u64 *pt, int level)
+{
+ u64 *p;
+ int i;
+
+ for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
+
+ if (is_large_pte(pt[i]))
+ continue;
+
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = get_pgtable_pte(pt[i]);
+ if (level > 2)
+ free_pgtable(p, level - 1);
+ else
+ free_pgtable_page(p);
+ }
+
+ free_pgtable_page(pt);
+}
+
+/* Allocate page table */
+static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
+ unsigned long pg_size, bool *updated)
+{
+ u64 *pte, *page;
+ int level, end_level;
+
+ level = get_pgtable_level() - 1;
+ end_level = page_size_to_level(pg_size);
+ pte = &pgd[PM_LEVEL_INDEX(level, iova)];
+ iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
+
+ while (level >= end_level) {
+ u64 __pte, __npte;
+
+ __pte = *pte;
+
+ if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
+ /* Unmap large pte */
+ cmpxchg64(pte, *pte, 0ULL);
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte)) {
+ page = alloc_pgtable_page();
+ if (!page)
+ return NULL;
+
+ __npte = set_pgtable_attr(page);
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_pgtable_page(page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ level -= 1;
+ pte = get_pgtable_pte(__pte);
+ pte = &pte[PM_LEVEL_INDEX(level, iova)];
+ }
+
+ /* Tear down existing pte entries */
+ if (IOMMU_PTE_PRESENT(*pte)) {
+ u64 *__pte;
+
+ *updated = true;
+ __pte = get_pgtable_pte(*pte);
+ cmpxchg64(pte, *pte, 0ULL);
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ free_pgtable(__pte, end_level - 1);
+ else if (pg_size == IOMMU_PAGE_SIZE_2M)
+ free_pgtable_page(__pte);
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address.
+ * If there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long iova, unsigned long *page_size)
+{
+ u64 *pte;
+ int level;
+
+ level = get_pgtable_level() - 1;
+ pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
+ /* Default page size is 4K */
+ *page_size = PAGE_SIZE;
+
+ while (level) {
+ /* Not present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Walk to the next level */
+ pte = get_pgtable_pte(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
+
+ /* Large page */
+ if (is_large_pte(*pte)) {
+ if (level == PAGE_MODE_3_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_1G;
+ else if (level == PAGE_MODE_2_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_2M;
+ else
+ return NULL; /* Wrongly set PSE bit in PTE */
+
+ break;
+ }
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+ struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
+ u64 *pte;
+ unsigned long map_size;
+ unsigned long mapped_size = 0;
+ unsigned long o_iova = iova;
+ size_t size = pgcount << __ffs(pgsize);
+ int count = 0;
+ int ret = 0;
+ bool updated = false;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
+ return -EINVAL;
+
+ if (!(prot & IOMMU_PROT_MASK))
+ return -EINVAL;
+
+ while (mapped_size < size) {
+ map_size = get_alloc_page_size(pgsize);
+ pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
+ if (!pte) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *pte = set_pte_attr(paddr, map_size, prot);
+
+ count++;
+ iova += map_size;
+ paddr += map_size;
+ mapped_size += map_size;
+ }
+
+out:
+ if (updated) {
+ if (count > 1)
+ amd_iommu_flush_tlb(&pdom->domain, 0);
+ else
+ amd_iommu_flush_page(&pdom->domain, 0, o_iova);
+ }
+
+ if (mapped)
+ *mapped += mapped_size;
+
+ return ret;
+}
+
+static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
+ unsigned long unmap_size;
+ unsigned long unmapped = 0;
+ size_t size = pgcount << __ffs(pgsize);
+ u64 *pte;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
+ return 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (!pte)
+ return unmapped;
+
+ *pte = 0ULL;
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v2_tlb_flush_all(void *cookie)
+{
+}
+
+static void v2_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v2_flush_ops = {
+ .tlb_flush_all = v2_tlb_flush_all,
+ .tlb_flush_walk = v2_tlb_flush_walk,
+ .tlb_add_page = v2_tlb_add_page,
+};
+
+static void v2_free_pgtable(struct io_pgtable *iop)
+{
+ struct protection_domain *pdom;
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+
+ pdom = container_of(pgtable, struct protection_domain, iop);
+ if (!(pdom->flags & PD_IOMMUV2_MASK))
+ return;
+
+ /*
+ * Make changes visible to IOMMUs. No need to clear gcr3 entry
+ * as gcr3 table is already freed.
+ */
+ amd_iommu_domain_update(pdom);
+
+ /* Free page table */
+ free_pgtable(pgtable->pgd, get_pgtable_level());
+}
+
+static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ struct protection_domain *pdom = (struct protection_domain *)cookie;
+ int ret;
+
+ pgtable->pgd = alloc_pgtable_page();
+ if (!pgtable->pgd)
+ return NULL;
+
+ ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
+ if (ret)
+ goto err_free_pgd;
+
+ pgtable->iop.ops.map_pages = iommu_v2_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
+ pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v2_flush_ops;
+
+ return &pgtable->iop;
+
+err_free_pgd:
+ free_pgtable_page(pgtable->pgd);
+
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
+ .alloc = v2_alloc_pgtable,
+ .free = v2_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 828672a46a3d..65856e401949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -11,8 +11,6 @@
#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_device.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -20,7 +18,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
@@ -42,6 +39,7 @@
#include <asm/dma.h>
#include "amd_iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -66,10 +64,6 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
-/*
- * Domain for untranslated devices - only allocated
- * if iommu=pt passed on kernel cmd line.
- */
const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -85,6 +79,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static int domain_enable_v2(struct protection_domain *domain, int pasids);
/****************************************************************************
*
@@ -1597,6 +1592,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
flags |= tmp;
+
+ if (domain->flags & PD_GIOV_MASK)
+ pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
@@ -1650,6 +1648,10 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Override supported page sizes */
+ if (domain->flags & PD_GIOV_MASK)
+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
@@ -1694,7 +1696,7 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev);
}
-static int pdev_iommuv2_enable(struct pci_dev *pdev)
+static int pdev_pri_ats_enable(struct pci_dev *pdev)
{
int ret;
@@ -1757,11 +1759,19 @@ static int attach_device(struct device *dev,
struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
ret = -EINVAL;
- if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
+
+ /*
+ * In case of using AMD_IOMMU_V1 page table mode and the device
+ * is enabling for PPR/ATS support (using v2 table),
+ * we need to make sure that the domain type is identity map.
+ */
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ def_domain->type != IOMMU_DOMAIN_IDENTITY) {
goto out;
+ }
if (dev_data->iommu_v2) {
- if (pdev_iommuv2_enable(pdev) != 0)
+ if (pdev_pri_ats_enable(pdev) != 0)
goto out;
dev_data->ats.enabled = true;
@@ -1852,6 +1862,10 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
if (!iommu)
return ERR_PTR(-ENODEV);
+ /* Not registered yet? */
+ if (!iommu->iommu.ops)
+ return ERR_PTR(-ENODEV);
+
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
@@ -1938,25 +1952,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
-int __init amd_iommu_init_api(void)
-{
- int err;
-
- err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-#ifdef CONFIG_ARM_AMBA
- err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
- if (err)
- return err;
-#endif
- err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-
- return 0;
-}
-
/*****************************************************************************
*
* The following functions belong to the exported interface of AMD IOMMU
@@ -1989,12 +1984,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
- if (domain->id)
- domain_id_free(domain->id);
-
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
+ if (domain->id)
+ domain_id_free(domain->id);
+
kfree(domain);
}
@@ -2012,8 +2007,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
+ if (!pt_root) {
+ domain_id_free(domain->id);
return -ENOMEM;
+ }
}
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2021,6 +2018,24 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return 0;
}
+static int protection_domain_init_v2(struct protection_domain *domain)
+{
+ spin_lock_init(&domain->lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ domain->flags |= PD_GIOV_MASK;
+
+ if (domain_enable_v2(domain, 1)) {
+ domain_id_free(domain->id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
@@ -2048,6 +2063,9 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
break;
+ case AMD_IOMMU_V2:
+ ret = protection_domain_init_v2(domain);
+ break;
default:
ret = -EINVAL;
}
@@ -2056,8 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
goto out_err;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
- if (!pgtbl_ops)
+ if (!pgtbl_ops) {
+ domain_id_free(domain->id);
goto out_err;
+ }
return domain;
out_err:
@@ -2175,13 +2195,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- if (ops->map)
+ if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
}
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot,
- gfp_t gfp)
+static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2197,8 +2217,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map)
- ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ if (ops->map_pages) {
+ ret = ops->map_pages(ops, iova, paddr, pgsize,
+ pgcount, prot, gfp, mapped);
+ }
return ret;
}
@@ -2224,9 +2246,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size);
}
-static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- size_t page_size,
- struct iommu_iotlb_gather *gather)
+static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2236,9 +2258,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
- r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+ r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
- amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+ if (r)
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
return r;
}
@@ -2252,7 +2275,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return ops->iova_to_phys(ops, iova);
}
-static bool amd_iommu_capable(enum iommu_cap cap)
+static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -2400,8 +2423,8 @@ const struct iommu_ops amd_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .unmap = amd_iommu_unmap,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -2448,11 +2471,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
}
EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int domain_enable_v2(struct protection_domain *domain, int pasids)
{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int levels, ret;
+ int levels;
/* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
@@ -2461,7 +2483,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- spin_lock_irqsave(&domain->lock, flags);
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ return -ENOMEM;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+
+ amd_iommu_domain_update(domain);
+
+ return 0;
+}
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *pdom = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdom->lock, flags);
/*
* Save us all sanity checks whether devices already in the
@@ -2469,24 +2509,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
* devices attached when it is switched into IOMMUv2 mode.
*/
ret = -EBUSY;
- if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
goto out;
- ret = -ENOMEM;
- domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
- goto out;
-
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
-
- amd_iommu_domain_update(domain);
-
- ret = 0;
+ if (!pdom->gcr3_tbl)
+ ret = domain_enable_v2(pdom, pasids);
out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
+ spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 1b1725759262..4526575b999e 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dev_printk.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -33,6 +32,8 @@
#include <linux/swab.h>
#include <linux/types.h>
+#include "dma-iommu.h"
+
#define DART_MAX_STREAMS 16
#define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2
@@ -81,10 +82,16 @@
#define DART_TTBR_VALID BIT(31)
#define DART_TTBR_SHIFT 12
+struct apple_dart_hw {
+ u32 oas;
+ enum io_pgtable_fmt fmt;
+};
+
/*
* Private structure associated with each DART device.
*
* @dev: device struct
+ * @hw: SoC-specific hardware data
* @regs: mapped MMIO region
* @irq: interrupt number, can be shared with other DARTs
* @clks: clocks associated with this DART
@@ -98,6 +105,7 @@
*/
struct apple_dart {
struct device *dev;
+ const struct apple_dart_hw *hw;
void __iomem *regs;
@@ -421,13 +429,13 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize,
.ias = 32,
- .oas = 36,
+ .oas = dart->hw->oas,
.coherent_walk = 1,
.iommu_dev = dart->dev,
};
dart_domain->pgtbl_ops =
- alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
+ alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM;
goto done;
@@ -820,27 +828,6 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
-{
- int ret;
-
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, ops);
- if (ret)
- return ret;
- }
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, ops);
- if (ret) {
- bus_set_iommu(&platform_bus_type, NULL);
- return ret;
- }
- }
-#endif
- return 0;
-}
-
static int apple_dart_probe(struct platform_device *pdev)
{
int ret;
@@ -854,6 +841,7 @@ static int apple_dart_probe(struct platform_device *pdev)
return -ENOMEM;
dart->dev = dev;
+ dart->hw = of_device_get_match_data(dev);
spin_lock_init(&dart->lock);
dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -895,14 +883,10 @@ static int apple_dart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dart);
- ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
- if (ret)
- goto err_free_irq;
-
ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
dev_name(&pdev->dev));
if (ret)
- goto err_remove_bus_ops;
+ goto err_free_irq;
ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
if (ret)
@@ -916,8 +900,6 @@ static int apple_dart_probe(struct platform_device *pdev)
err_sysfs_remove:
iommu_device_sysfs_remove(&dart->iommu);
-err_remove_bus_ops:
- apple_dart_set_bus_ops(NULL);
err_free_irq:
free_irq(dart->irq, dart);
err_clk_disable:
@@ -932,7 +914,6 @@ static int apple_dart_remove(struct platform_device *pdev)
apple_dart_hw_reset(dart);
free_irq(dart->irq, dart);
- apple_dart_set_bus_ops(NULL);
iommu_device_unregister(&dart->iommu);
iommu_device_sysfs_remove(&dart->iommu);
@@ -942,8 +923,18 @@ static int apple_dart_remove(struct platform_device *pdev)
return 0;
}
+static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ .oas = 36,
+ .fmt = APPLE_DART,
+};
+static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ .oas = 42,
+ .fmt = APPLE_DART2,
+};
+
static const struct of_device_id apple_dart_of_match[] = {
- { .compatible = "apple,t8103-dart", .data = NULL },
+ { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
};
MODULE_DEVICE_TABLE(of, apple_dart_of_match);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index d32b02336411..ba47c73f5b8c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -14,7 +14,6 @@
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io-pgtable.h>
@@ -28,9 +27,8 @@
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-
#include "arm-smmu-v3.h"
+#include "../../dma-iommu.h"
#include "../../iommu-sva-lib.h"
static bool disable_bypass = true;
@@ -1992,11 +1990,14 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
};
/* IOMMU API */
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -2817,6 +2818,26 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
}
}
+/*
+ * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
+ * PCIe link and save the data to memory by DMA. The hardware is restricted to
+ * use identity mapping only.
+ */
+#define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
+ (pdev)->device == 0xa12e)
+
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (IS_HISI_PTT_DEVICE(pdev))
+ return IOMMU_DOMAIN_IDENTITY;
+ }
+
+ return 0;
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -2831,6 +2852,7 @@ static struct iommu_ops arm_smmu_ops = {
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.page_response = arm_smmu_page_response,
+ .def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -3673,43 +3695,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
return SZ_128K;
}
-static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
-{
- int err;
-
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- return err;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != ops) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- if (platform_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-
- return 0;
-
-err_reset_amba_ops:
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
- return err;
-}
-
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size)
{
@@ -3848,27 +3833,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ret;
}
- ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
- if (ret)
- goto err_unregister_device;
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
- arm_smmu_set_bus_ops(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dfa82df00342..6c1114a4d6cc 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -21,7 +21,6 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -37,10 +36,10 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
-#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
#include "arm-smmu.h"
+#include "../../dma-iommu.h"
/*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -93,8 +92,6 @@ static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
-static int arm_smmu_bus_init(struct iommu_ops *ops);
-
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -180,20 +177,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
kfree(sids);
return err;
}
-
-/*
- * With the legacy DT binding in play, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no probe_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
- if (using_legacy_binding)
- return arm_smmu_bus_init(&arm_smmu_ops);
- return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
#else
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
@@ -1330,15 +1313,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova);
}
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- /*
- * Return true here as the SMMU can always send out coherent
- * requests.
- */
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -2016,52 +1998,6 @@ static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
return 0;
}
-static int arm_smmu_bus_init(struct iommu_ops *ops)
-{
- int err;
-
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type)) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- return err;
- }
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype)) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_platform_ops;
- }
-#endif
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-#endif
-#ifdef CONFIG_FSL_MC_BUS
- if (!iommu_present(&fsl_mc_bus_type)) {
- err = bus_set_iommu(&fsl_mc_bus_type, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- return 0;
-
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
-err_reset_amba_ops: __maybe_unused;
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_platform_ops: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
- return err;
-}
-
static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
{
struct list_head rmr_list;
@@ -2226,7 +2162,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return err;
}
platform_set_drvdata(pdev, smmu);
@@ -2248,24 +2185,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
}
- /*
- * For ACPI and generic DT bindings, an SMMU will be probed before
- * any device which might need it, so we want the bus ops in place
- * ready to handle default domain setup as soon as any SMMU exists.
- */
- if (!using_legacy_binding) {
- err = arm_smmu_bus_init(&arm_smmu_ops);
- if (err)
- goto err_unregister_device;
- }
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return err;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -2278,7 +2198,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n");
- arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17235116d3bb..3869c3ecda8c 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -493,7 +493,7 @@ static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static bool qcom_iommu_capable(enum iommu_cap cap)
+static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -837,8 +837,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
-
if (qcom_iommu->local_base) {
pm_runtime_get_sync(dev);
writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
@@ -856,8 +854,6 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
{
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
- bus_set_iommu(&platform_bus_type, NULL);
-
pm_runtime_force_suspend(&pdev->dev);
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&qcom_iommu->iommu);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 17dd683b2fce..9297b741f5e8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -13,7 +13,6 @@
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
@@ -30,6 +29,8 @@
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+#include "dma-iommu.h"
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -1633,6 +1634,13 @@ out_free_page:
return NULL;
}
+/**
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
+ * @desc: MSI descriptor, will store the MSI page
+ * @msi_addr: MSI target address to be mapped
+ *
+ * Return: 0 on success or negative error code if the mapping failed.
+ */
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
@@ -1661,8 +1669,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0;
}
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
+/**
+ * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
+ * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
+ * @msg: MSI message containing target physical address
+ */
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
new file mode 100644
index 000000000000..942790009292
--- /dev/null
+++ b/drivers/iommu/dma-iommu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-2015 ARM Ltd.
+ */
+#ifndef __DMA_IOMMU_H
+#define __DMA_IOMMU_H
+
+#include <linux/iommu.h>
+
+#ifdef CONFIG_IOMMU_DMA
+
+int iommu_get_dma_cookie(struct iommu_domain *domain);
+void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+int iommu_dma_init_fq(struct iommu_domain *domain);
+
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+extern bool iommu_dma_forcedac;
+
+#else /* CONFIG_IOMMU_DMA */
+
+static inline int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 8e18984a0c4f..45fd4850bacb 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1446,16 +1446,7 @@ static int __init exynos_iommu_init(void)
goto err_zero_lv2;
}
- ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
- if (ret) {
- pr_err("%s: Failed to register exynos-iommu driver.\n",
- __func__);
- goto err_set_iommu;
- }
-
return 0;
-err_set_iommu:
- kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
err_zero_lv2:
platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver:
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 011f9ab7f743..fa20f4b03e12 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -178,7 +178,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
return iova;
}
-static bool fsl_pamu_capable(enum iommu_cap cap)
+static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
{
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
@@ -476,11 +476,7 @@ int __init pamu_domain_init(void)
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
- return ret;
}
- bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
- bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
-
return ret;
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 39a06d245f12..b7dff5092fd2 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -19,8 +19,9 @@ config INTEL_IOMMU
select DMAR_TABLE
select SWIOTLB
select IOASID
- select IOMMU_DMA
select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -48,10 +49,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on X86_64
- select PCI_PASID
- select PCI_PRI
select MMU_NOTIFIER
- select IOASID
select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 3ee68393122f..806986696841 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -37,7 +37,7 @@ static inline void check_dmar_capabilities(struct intel_iommu *a,
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
@@ -84,7 +84,7 @@ static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type
goto out;
}
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 31bc50e538a3..a8b36c3fddf1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -15,7 +15,6 @@
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dmi.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
@@ -26,6 +25,7 @@
#include <linux/tboot.h>
#include "iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -199,6 +199,11 @@ static inline void context_set_domain_id(struct context_entry *context,
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
+static inline void context_set_pasid(struct context_entry *context)
+{
+ context->lo |= CONTEXT_PASIDE;
+}
+
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
@@ -399,7 +404,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
- fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */
@@ -1234,6 +1239,13 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ /*
+ * Hardware invalidates all DMA remapping hardware translation
+ * caches as part of SRTP flow.
+ */
+ if (cap_esrtps(iommu->cap))
+ return;
+
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
if (sm_supported(iommu))
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
@@ -1350,21 +1362,18 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+domain_lookup_dev_info(struct dmar_domain *domain,
+ struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct device_domain_info *info;
unsigned long flags;
- if (!iommu->qi)
- return NULL;
-
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
spin_unlock_irqrestore(&domain->lock, flags);
- return info->ats_supported ? info : NULL;
+ return info;
}
}
spin_unlock_irqrestore(&domain->lock, flags);
@@ -1389,7 +1398,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
spin_unlock_irqrestore(&domain->lock, flags);
}
-static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
@@ -1412,7 +1421,6 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
info->pfsid = pci_dev_id(pf_pdev);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is
undefined. So always enable PASID support on devices which
@@ -1425,7 +1433,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
-#endif
+
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
@@ -1448,16 +1456,16 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
info->ats_enabled = 0;
domain_update_iotlb(info->domain);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
+
if (info->pri_enabled) {
pci_disable_pri(pdev);
info->pri_enabled = 0;
}
+
if (info->pasid_enabled) {
pci_disable_pasid(pdev);
info->pasid_enabled = 0;
}
-#endif
}
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
@@ -1907,7 +1915,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
- iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
struct context_entry *context;
@@ -1980,6 +1988,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_sm_dte(context);
if (info && info->pri_supported)
context_set_sm_pre(context);
+ if (info && info->pasid_supported)
+ context_set_pasid(context);
} else {
struct dma_pte *pgd = domain->pgd;
int agaw;
@@ -2037,7 +2047,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else {
iommu_flush_write_buffer(iommu);
}
- iommu_enable_dev_iotlb(info);
+ iommu_enable_pci_caps(info);
ret = 0;
@@ -3896,7 +3906,6 @@ static int __init probe_acpi_namespace_devices(void)
continue;
}
- pn->dev->bus->iommu_ops = &intel_iommu_ops;
ret = iommu_probe_device(pn->dev);
if (ret)
break;
@@ -4029,7 +4038,6 @@ int __init intel_iommu_init(void)
}
up_read(&dmar_global_lock);
- bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
@@ -4437,7 +4445,7 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-static bool intel_iommu_capable(enum iommu_cap cap)
+static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return true;
@@ -4457,7 +4465,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
+ if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -4574,52 +4582,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
list_add_tail(&reg->list, head);
}
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct context_entry *context;
- struct dmar_domain *domain;
- u64 ctx_lo;
- int ret;
-
- domain = info->domain;
- if (!domain)
- return -EINVAL;
-
- spin_lock(&iommu->lock);
- ret = -EINVAL;
- if (!info->pasid_supported)
- goto out;
-
- context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
- if (WARN_ON(!context))
- goto out;
-
- ctx_lo = context[0].lo;
-
- if (!(ctx_lo & CONTEXT_PASIDE)) {
- ctx_lo |= CONTEXT_PASIDE;
- context[0].lo = ctx_lo;
- wmb();
- iommu->flush.flush_context(iommu,
- domain_id_iommu(domain, iommu),
- PCI_DEVID(info->bus, info->devfn),
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- }
-
- /* Enable PASID support in the device, if it wasn't already */
- if (!info->pasid_enabled)
- iommu_enable_dev_iotlb(info);
-
- ret = 0;
-
- out:
- spin_unlock(&iommu->lock);
-
- return ret;
-}
-
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{
if (dev_is_pci(dev))
@@ -4643,9 +4605,6 @@ static int intel_iommu_enable_sva(struct device *dev)
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV;
- if (intel_iommu_enable_pasid(iommu, dev))
- return -ENODEV;
-
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
return -EINVAL;
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 74b0e19e23ee..92023dff9513 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -146,7 +146,9 @@
/*
* Decoding Capability Register
*/
-#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_esrtps(c) (((c) >> 63) & 1)
+#define cap_esirtps(c) (((c) >> 62) & 1)
+#define cap_fl5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
@@ -586,6 +588,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ unsigned long prq_seq_number;
struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
@@ -741,7 +744,6 @@ extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -761,7 +763,6 @@ struct intel_svm_dev {
struct device *dev;
struct intel_iommu *iommu;
struct iommu_sva sva;
- unsigned long prq_seq_number;
u32 pasid;
int users;
u16 did;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 2e9683e970f8..5962bb5027d0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -494,7 +494,8 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
* Global invalidation of interrupt entry cache to make sure the
* hardware uses the new irq remapping table.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
}
static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
@@ -680,7 +681,8 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
* global invalidation of interrupt entry cache before disabling
* interrupt-remapping.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c5e7e8b020a5..c30ddac40ee5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -392,16 +392,6 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_eafe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid)
@@ -529,7 +519,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
- if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name);
return -EINVAL;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8bcfb93dda56..7d08eb034f2d 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -49,23 +49,6 @@ static void *pasid_private_find(ioasid_t pasid)
}
static struct intel_svm_dev *
-svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
-{
- struct intel_svm_dev *sdev = NULL, *t;
-
- rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
- if (t->sid == sid) {
- sdev = t;
- break;
- }
- }
- rcu_read_unlock();
-
- return sdev;
-}
-
-static struct intel_svm_dev *
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
{
struct intel_svm_dev *sdev = NULL, *t;
@@ -181,7 +164,7 @@ void intel_svm_check(struct intel_iommu *iommu)
}
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
- !cap_5lp_support(iommu->cap)) {
+ !cap_fl5lp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible paging mode\n",
iommu->name);
return;
@@ -706,11 +689,10 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
static irqreturn_t prq_event_thread(int irq, void *d)
{
- struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
- struct intel_svm *svm = NULL;
struct page_req_dsc *req;
int head, tail, handled;
+ struct pci_dev *pdev;
u64 address;
/*
@@ -730,8 +712,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("IOMMU: %s: Page request without PASID\n",
iommu->name);
bad_req:
- svm = NULL;
- sdev = NULL;
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
goto prq_advance;
}
@@ -758,34 +738,19 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- if (!svm || svm->pasid != req->pasid) {
- /*
- * It can't go away, because the driver is not permitted
- * to unbind the mm while any page faults are outstanding.
- */
- svm = pasid_private_find(req->pasid);
- if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
- goto bad_req;
- }
-
- if (!sdev || sdev->sid != req->rid) {
- sdev = svm_lookup_device_by_sid(svm, req->rid);
- if (!sdev)
- goto bad_req;
- }
-
- sdev->prq_seq_number++;
-
+ pdev = pci_get_domain_bus_and_slot(iommu->segment,
+ PCI_BUS_NUM(req->rid),
+ req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (intel_svm_prq_report(iommu, sdev->dev, req))
+ if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
+ trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1],
- sdev->prq_seq_number);
+ iommu->prq_seq_number++);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -881,8 +846,6 @@ int intel_svm_page_response(struct device *dev,
struct iommu_page_response *msg)
{
struct iommu_fault_page_request *prm;
- struct intel_svm_dev *sdev = NULL;
- struct intel_svm *svm = NULL;
struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
@@ -901,8 +864,6 @@ int intel_svm_page_response(struct device *dev,
if (!msg || !evt)
return -EINVAL;
- mutex_lock(&pasid_mutex);
-
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@@ -919,12 +880,6 @@ int intel_svm_page_response(struct device *dev,
goto out;
}
- ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
- if (ret || !sdev) {
- ret = -ENODEV;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -954,6 +909,5 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
out:
- mutex_unlock(&pasid_mutex);
return ret;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 94ff319ae8ac..0ba817e86346 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -130,9 +130,6 @@
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
-#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
-#define APPLE_DART_PTE_PROT_NO_READ (1<<8)
-
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -200,8 +197,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
- gfp | __GFP_ZERO, order);
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
if (!p)
return NULL;
@@ -406,15 +402,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte;
- if (data->iop.fmt == APPLE_DART) {
- pte = 0;
- if (!(prot & IOMMU_WRITE))
- pte |= APPLE_DART_PTE_PROT_NO_WRITE;
- if (!(prot & IOMMU_READ))
- pte |= APPLE_DART_PTE_PROT_NO_READ;
- return pte;
- }
-
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG;
@@ -1107,52 +1094,6 @@ out_free_data:
return NULL;
}
-static struct io_pgtable *
-apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
-{
- struct arm_lpae_io_pgtable *data;
- int i;
-
- if (cfg->oas > 36)
- return NULL;
-
- data = arm_lpae_alloc_pgtable(cfg);
- if (!data)
- return NULL;
-
- /*
- * The table format itself always uses two levels, but the total VA
- * space is mapped by four separate tables, making the MMIO registers
- * an effective "level 1". For simplicity, though, we treat this
- * equivalently to LPAE stage 2 concatenation at level 2, with the
- * additional TTBRs each just pointing at consecutive pages.
- */
- if (data->start_level < 1)
- goto out_free_data;
- if (data->start_level == 1 && data->pgd_bits > 2)
- goto out_free_data;
- if (data->start_level > 1)
- data->pgd_bits = 0;
- data->start_level = 2;
- cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
- data->pgd_bits += data->bits_per_level;
-
- data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
- cfg);
- if (!data->pgd)
- goto out_free_data;
-
- for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
- cfg->apple_dart_cfg.ttbr[i] =
- virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
-
- return &data->iop;
-
-out_free_data:
- kfree(data);
- return NULL;
-}
-
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
@@ -1178,11 +1119,6 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.free = arm_lpae_free_pgtable,
};
-struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
- .alloc = apple_dart_alloc_pgtable,
- .free = arm_lpae_free_pgtable,
-};
-
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie __initdata;
@@ -1343,12 +1279,17 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, pass = 0, fail = 0;
+ struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
.coherent_walk = true,
+ .iommu_dev = &dev,
};
+ /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
+ set_dev_node(&dev, NUMA_NO_NODE);
+
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(ias); ++j) {
cfg.pgsize_bitmap = pgsize[i];
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
new file mode 100644
index 000000000000..74b1ef2b96be
--- /dev/null
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple DART page table allocator.
+ *
+ * Copyright (C) 2022 The Asahi Linux Contributors
+ *
+ * Based on io-pgtable-arm.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "dart io-pgtable: " fmt
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+
+#define DART1_MAX_ADDR_BITS 36
+
+#define DART_MAX_TABLES 4
+#define DART_LEVELS 2
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct dart_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define DART_GRANULE(d) \
+ (sizeof(dart_iopte) << (d)->bits_per_level)
+#define DART_PTES_PER_TABLE(d) \
+ (DART_GRANULE(d) >> ilog2(sizeof(dart_iopte)))
+
+#define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52)
+#define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40)
+
+#define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12)
+#define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10)
+#define APPLE_DART2_PADDR_SHIFT (4)
+
+/* Apple DART1 protection bits */
+#define APPLE_DART1_PTE_PROT_NO_READ BIT(8)
+#define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7)
+#define APPLE_DART1_PTE_PROT_SP_DIS BIT(1)
+
+/* Apple DART2 protection bits */
+#define APPLE_DART2_PTE_PROT_NO_READ BIT(3)
+#define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2)
+#define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1)
+
+/* marks PTE as valid */
+#define APPLE_DART_PTE_VALID BIT(0)
+
+/* IOPTE accessors */
+#define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d))
+
+struct dart_io_pgtable {
+ struct io_pgtable iop;
+
+ int tbl_bits;
+ int bits_per_level;
+
+ void *pgd[DART_MAX_TABLES];
+};
+
+typedef u64 dart_iopte;
+
+
+static dart_iopte paddr_to_iopte(phys_addr_t paddr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte pte;
+
+ if (data->iop.fmt == APPLE_DART)
+ return paddr & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ pte = paddr >> APPLE_DART2_PADDR_SHIFT;
+ pte &= APPLE_DART2_PADDR_MASK;
+
+ return pte;
+}
+
+static phys_addr_t iopte_to_paddr(dart_iopte pte,
+ struct dart_io_pgtable *data)
+{
+ u64 paddr;
+
+ if (data->iop.fmt == APPLE_DART)
+ return pte & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ paddr = pte & APPLE_DART2_PADDR_MASK;
+ paddr <<= APPLE_DART2_PADDR_SHIFT;
+
+ return paddr;
+}
+
+static void *__dart_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg)
+{
+ int order = get_order(size);
+ struct page *p;
+
+ VM_BUG_ON((gfp & __GFP_HIGHMEM));
+ p = alloc_pages(gfp | __GFP_ZERO, order);
+ if (!p)
+ return NULL;
+
+ return page_address(p);
+}
+
+static int dart_init_pte(struct dart_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ dart_iopte prot, int num_entries,
+ dart_iopte *ptep)
+{
+ int i;
+ dart_iopte pte = prot;
+ size_t sz = data->iop.cfg.pgsize_bitmap;
+
+ for (i = 0; i < num_entries; i++)
+ if (ptep[i] & APPLE_DART_PTE_VALID) {
+ /* We require an unmap first */
+ WARN_ON(ptep[i] & APPLE_DART_PTE_VALID);
+ return -EEXIST;
+ }
+
+ /* subpage protection: always allow access to the entire page */
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
+
+ pte |= APPLE_DART1_PTE_PROT_SP_DIS;
+ pte |= APPLE_DART_PTE_VALID;
+
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
+
+ return 0;
+}
+
+static dart_iopte dart_install_table(dart_iopte *table,
+ dart_iopte *ptep,
+ dart_iopte curr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte old, new;
+
+ new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID;
+
+ /*
+ * Ensure the table itself is visible before its PTE can be.
+ * Whilst we could get away with cmpxchg64_release below, this
+ * doesn't have any ordering semantics when !CONFIG_SMP.
+ */
+ dma_wmb();
+
+ old = cmpxchg64_relaxed(ptep, curr, new);
+
+ return old;
+}
+
+static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+{
+ return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->tbl_bits) - 1);
+}
+
+static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+{
+ dart_iopte pte, *ptep;
+ int tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ if (!ptep)
+ return NULL;
+
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
+
+ /* Deref to get level 2 table */
+ return iopte_deref(pte, data);
+}
+
+static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
+ int prot)
+{
+ dart_iopte pte = 0;
+
+ if (data->iop.fmt == APPLE_DART) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART1_PTE_PROT_NO_READ;
+ }
+ if (data->iop.fmt == APPLE_DART2) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART2_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART2_PTE_PROT_NO_READ;
+ if (!(prot & IOMMU_CACHE))
+ pte |= APPLE_DART2_PTE_PROT_NO_CACHE;
+ }
+
+ return pte;
+}
+
+static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ size_t tblsz = DART_GRANULE(data);
+ int ret = 0, tbl, num_entries, max_entries, map_idx_start;
+ dart_iopte pte, *cptep, *ptep;
+ dart_iopte prot;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap))
+ return -EINVAL;
+
+ if (WARN_ON(paddr >> cfg->oas))
+ return -ERANGE;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* no L2 table present */
+ if (!pte) {
+ cptep = __dart_alloc_pages(tblsz, gfp, cfg);
+ if (!cptep)
+ return -ENOMEM;
+
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ free_pages((unsigned long)cptep, get_order(tblsz));
+
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
+
+ ptep = iopte_deref(pte, data);
+
+ /* install a leaf entries into L2 table */
+ prot = dart_prot_to_pte(data, iommu_prot);
+ map_idx_start = dart_get_l2_index(data, iova);
+ max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+ ptep += map_idx_start;
+ ret = dart_init_pte(data, iova, paddr, prot, num_entries, ptep);
+ if (!ret && mapped)
+ *mapped += num_entries * pgsize;
+
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ wmb();
+
+ return ret;
+}
+
+static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int i = 0, num_entries, max_entries, unmap_idx_start;
+ dart_iopte pte, *ptep;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
+ return 0;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (WARN_ON(!ptep))
+ return 0;
+
+ unmap_idx_start = dart_get_l2_index(data, iova);
+ ptep += unmap_idx_start;
+
+ max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+
+ while (i < num_entries) {
+ pte = READ_ONCE(*ptep);
+ if (WARN_ON(!pte))
+ break;
+
+ /* clear pte */
+ *ptep = 0;
+
+ if (!iommu_iotlb_gather_queued(gather))
+ io_pgtable_tlb_add_page(&data->iop, gather,
+ iova + i * pgsize, pgsize);
+
+ ptep++;
+ i++;
+ }
+
+ return i * pgsize;
+}
+
+static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ dart_iopte pte, *ptep;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ ptep += dart_get_l2_index(data, iova);
+
+ pte = READ_ONCE(*ptep);
+ /* Found translation */
+ if (pte) {
+ iova &= (data->iop.cfg.pgsize_bitmap - 1);
+ return iopte_to_paddr(pte, data) | iova;
+ }
+
+ /* Ran out of page tables to walk */
+ return 0;
+}
+
+static struct dart_io_pgtable *
+dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ struct dart_io_pgtable *data;
+ int tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
+
+ va_bits = cfg->ias - pg_shift;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
+ if ((1 << tbl_bits) > DART_MAX_TABLES)
+ return NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->tbl_bits = tbl_bits;
+ data->bits_per_level = bits_per_level;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map_pages = dart_map_pages,
+ .unmap_pages = dart_unmap_pages,
+ .iova_to_phys = dart_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct dart_io_pgtable *data;
+ int i;
+
+ if (!cfg->coherent_walk)
+ return NULL;
+
+ if (cfg->oas != 36 && cfg->oas != 42)
+ return NULL;
+
+ if (cfg->ias > cfg->oas)
+ return NULL;
+
+ if (!(cfg->pgsize_bitmap == SZ_4K || cfg->pgsize_bitmap == SZ_16K))
+ return NULL;
+
+ data = dart_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
+
+ cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+
+ for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
+ data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
+ cfg);
+ if (!data->pgd[i])
+ goto out_free_data;
+ cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
+ }
+
+ return &data->iop;
+
+out_free_data:
+ while (--i >= 0)
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ kfree(data);
+ return NULL;
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ dart_iopte *ptep, *end;
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
+ ptep = data->pgd[i];
+ end = (void *)ptep + DART_GRANULE(data);
+
+ while (ptep != end) {
+ dart_iopte pte = *ptep++;
+
+ if (pte) {
+ unsigned long page =
+ (unsigned long)iopte_deref(pte, data);
+
+ free_pages(page, get_order(DART_GRANULE(data)));
+ }
+ }
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ }
+
+ kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
+ .alloc = apple_dart_alloc_pgtable,
+ .free = apple_dart_free_pgtable,
+};
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index f4bfcef98297..b843fcd365d2 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -20,13 +20,17 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
+#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_DART
[APPLE_DART] = &io_pgtable_apple_dart_init_fns,
+ [APPLE_DART2] = &io_pgtable_apple_dart_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
#ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
+ [AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
#endif
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3a808146b50f..4893c2429ca5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -6,8 +6,8 @@
#define pr_fmt(fmt) "iommu: " fmt
+#include <linux/amba/bus.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bug.h>
@@ -16,17 +16,21 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
+#include "dma-iommu.h"
+
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -75,6 +79,8 @@ static const char * const iommu_group_resv_type_string[] = {
#define IOMMU_CMD_LINE_DMA_API BIT(0)
#define IOMMU_CMD_LINE_STRICT BIT(1)
+static int iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data);
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -103,6 +109,22 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
+static struct bus_type * const iommu_buses[] = {
+ &platform_bus_type,
+#ifdef CONFIG_PCI
+ &pci_bus_type,
+#endif
+#ifdef CONFIG_ARM_AMBA
+ &amba_bustype,
+#endif
+#ifdef CONFIG_FSL_MC_BUS
+ &fsl_mc_bus_type,
+#endif
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+ &host1x_context_device_bus_type,
+#endif
+};
+
/*
* Use a function instead of an array here because the domain-type is a
* bit-field, so an array would waste memory.
@@ -126,6 +148,8 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void)
{
+ struct notifier_block *nb;
+
if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false);
@@ -152,10 +176,27 @@ static int __init iommu_subsys_init(void)
(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
"(set via kernel command line)" : "");
+ nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
+ if (!nb)
+ return -ENOMEM;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+ nb[i].notifier_call = iommu_bus_notifier;
+ bus_register_notifier(iommu_buses[i], &nb[i]);
+ }
+
return 0;
}
subsys_initcall(iommu_subsys_init);
+static int remove_iommu_group(struct device *dev, void *data)
+{
+ if (dev->iommu && dev->iommu->iommu_dev == data)
+ iommu_release_device(dev);
+
+ return 0;
+}
+
/**
* iommu_device_register() - Register an IOMMU hardware instance
* @iommu: IOMMU handle for the instance
@@ -167,23 +208,42 @@ subsys_initcall(iommu_subsys_init);
int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops, struct device *hwdev)
{
+ int err = 0;
+
/* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL;
+ /*
+ * Temporarily enforce global restriction to a single driver. This was
+ * already the de-facto behaviour, since any possible combination of
+ * existing drivers would compete for at least the PCI or platform bus.
+ */
+ if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
+ return -EBUSY;
iommu->ops = ops;
if (hwdev)
- iommu->fwnode = hwdev->fwnode;
+ iommu->fwnode = dev_fwnode(hwdev);
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- return 0;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
+ iommu_buses[i]->iommu_ops = ops;
+ err = bus_iommu_probe(iommu_buses[i]);
+ }
+ if (err)
+ iommu_device_unregister(iommu);
+ return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu)
{
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
+ bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
+
spin_lock(&iommu_device_lock);
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
@@ -654,7 +714,6 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -1612,13 +1671,6 @@ static int probe_iommu_group(struct device *dev, void *data)
return ret;
}
-static int remove_iommu_group(struct device *dev, void *data)
-{
- iommu_release_device(dev);
-
- return 0;
-}
-
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1775,75 +1827,6 @@ int bus_iommu_probe(struct bus_type *bus)
return ret;
}
-static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
-{
- struct notifier_block *nb;
- int err;
-
- nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- if (!nb)
- return -ENOMEM;
-
- nb->notifier_call = iommu_bus_notifier;
-
- err = bus_register_notifier(bus, nb);
- if (err)
- goto out_free;
-
- err = bus_iommu_probe(bus);
- if (err)
- goto out_err;
-
-
- return 0;
-
-out_err:
- /* Clean up */
- bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
- bus_unregister_notifier(bus, nb);
-
-out_free:
- kfree(nb);
-
- return err;
-}
-
-/**
- * bus_set_iommu - set iommu-callbacks for the bus
- * @bus: bus.
- * @ops: the callbacks provided by the iommu-driver
- *
- * This function is called by an iommu driver to set the iommu methods
- * used for a particular bus. Drivers for devices on that bus can use
- * the iommu-api after these ops are registered.
- * This special function is needed because IOMMUs are usually devices on
- * the bus itself, so the iommu drivers are not initialized when the bus
- * is set up. With this function the iommu-driver can set the iommu-ops
- * afterwards.
- */
-int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
-{
- int err;
-
- if (ops == NULL) {
- bus->iommu_ops = NULL;
- return 0;
- }
-
- if (bus->iommu_ops != NULL)
- return -EBUSY;
-
- bus->iommu_ops = ops;
-
- /* Do IOMMU specific setup for this bus-type */
- err = iommu_bus_init(bus, ops);
- if (err)
- bus->iommu_ops = NULL;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(bus_set_iommu);
-
bool iommu_present(struct bus_type *bus)
{
return bus->iommu_ops != NULL;
@@ -1869,19 +1852,10 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
if (!ops->capable)
return false;
- return ops->capable(cap);
+ return ops->capable(dev, cap);
}
EXPORT_SYMBOL_GPL(device_iommu_capable);
-bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
-{
- if (!bus->iommu_ops || !bus->iommu_ops->capable)
- return false;
-
- return bus->iommu_ops->capable(cap);
-}
-EXPORT_SYMBOL_GPL(iommu_capable);
-
/**
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 47d1983dfa2a..a44ad92fc5eb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -661,9 +661,6 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
unsigned long flags;
int i;
- if (!mag)
- return;
-
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (i = 0 ; i < mag->size; ++i) {
@@ -683,12 +680,12 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
static bool iova_magazine_full(struct iova_magazine *mag)
{
- return (mag && mag->size == IOVA_MAG_SIZE);
+ return mag->size == IOVA_MAG_SIZE;
}
static bool iova_magazine_empty(struct iova_magazine *mag)
{
- return (!mag || mag->size == 0);
+ return mag->size == 0;
}
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
@@ -697,8 +694,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
int i;
unsigned long pfn;
- BUG_ON(iova_magazine_empty(mag));
-
/* Only fall back to the rbtree if we have no suitable pfns at all */
for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
if (i == 0)
@@ -713,8 +708,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
{
- BUG_ON(iova_magazine_full(mag));
-
mag->pfns[mag->size++] = pfn;
}
@@ -882,7 +875,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
unsigned int log_size = order_base_2(size);
- if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1d42084d0276..3b30c0752274 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,11 +1090,6 @@ static int ipmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret)
return ret;
-
-#if defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
}
/*
@@ -1168,32 +1163,4 @@ static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe,
.remove = ipmmu_remove,
};
-
-static int __init ipmmu_init(void)
-{
- struct device_node *np;
- static bool setup_done;
- int ret;
-
- if (setup_done)
- return 0;
-
- np = of_find_matching_node(NULL, ipmmu_of_ids);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = platform_driver_register(&ipmmu_driver);
- if (ret < 0)
- return ret;
-
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
-
- setup_done = true;
- return 0;
-}
-subsys_initcall(ipmmu_init);
+builtin_platform_driver(ipmmu_driver);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a24aa804ea3..16179a9a7283 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -792,8 +792,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
- bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
-
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 7e363b1f24df..5a4e00e4bbbc 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -138,6 +138,7 @@
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
#define PGTABLE_PA_35_EN BIT(17)
+#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -157,6 +158,7 @@
enum mtk_iommu_plat {
M4U_MT2712,
M4U_MT6779,
+ M4U_MT6795,
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
@@ -955,7 +957,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int ban
* Global control settings are in bank0. May re-init these global registers
* since no sure if there is bank0 consumers.
*/
- if (data->plat_data->m4u_plat == M4U_MT8173) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else {
@@ -1243,30 +1245,13 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
- if (ret)
- goto out_list_del;
- }
-
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
- goto out_bus_set_null;
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
- if (ret) /* PCIe fail don't affect platform_bus. */
- goto out_list_del;
- }
-#endif
+ goto out_list_del;
}
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_list_del:
list_del(&data->list);
iommu_device_unregister(&data->iommu);
@@ -1294,11 +1279,6 @@ static int mtk_iommu_remove(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
}
pm_runtime_disable(&pdev->dev);
for (i = 0; i < data->plat_data->banks_num; i++) {
@@ -1413,6 +1393,19 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
+static const struct mtk_iommu_plat_data mt6795_data = {
+ .m4u_plat = M4U_MT6795,
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1427,7 +1420,8 @@ static const struct mtk_iommu_plat_data mt8167_data = {
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
- HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.banks_num = 1,
.banks_enable = {true},
@@ -1524,6 +1518,7 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = {
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
+ { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 128c7a3f1778..6e0e65831eb7 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -691,19 +691,11 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
if (ret)
goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
- if (ret)
- goto out_dev_unreg;
- }
-
ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret)
- goto out_bus_set_null;
+ goto out_dev_unreg;
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_dev_unreg:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
@@ -718,9 +710,6 @@ static int mtk_iommu_v1_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
-
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index a99afb5d9011..259f65291d90 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
+ if (len < maxcol) \
+ goto out; \
+ bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
- if (len < maxcol) \
- goto out; \
} while (0)
static ssize_t
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d9cf2820c02e..07ee2600113c 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1776,14 +1776,8 @@ static int __init omap_iommu_init(void)
goto fail_driver;
}
- ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
- if (ret)
- goto fail_bus;
-
return 0;
-fail_bus:
- platform_driver_unregister(&omap_iommu_driver);
fail_driver:
kmem_cache_destroy(iopte_cachep);
return ret;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ab57c4b8fade..a3fc59b814ab 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1300,8 +1300,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (!dma_dev)
dma_dev = &pdev->dev;
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
-
pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) {
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index c898bcbbce11..3c071782f6f1 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -39,7 +39,7 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
return container_of(dom, struct s390_domain, domain);
}
-static bool s390_iommu_capable(enum iommu_cap cap)
+static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -185,7 +185,12 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{
- struct zpci_dev *zdev = to_zpci_dev(dev);
+ struct zpci_dev *zdev;
+
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-ENODEV);
+
+ zdev = to_zpci_dev(dev);
return &zdev->iommu_dev;
}
@@ -385,9 +390,3 @@ static const struct iommu_ops s390_iommu_ops = {
.free = s390_domain_free,
}
};
-
-static int __init s390_iommu_init(void)
-{
- return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
-}
-subsys_initcall(s390_iommu_init);
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 511959c8a14d..fadd2c907222 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -496,9 +496,6 @@ static int sprd_iommu_probe(struct platform_device *pdev)
if (ret)
goto remove_sysfs;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
-
ret = sprd_iommu_clk_enable(sdev);
if (ret)
goto unregister_iommu;
@@ -534,8 +531,6 @@ static int sprd_iommu_remove(struct platform_device *pdev)
iommu_group_put(sdev->group);
sdev->group = NULL;
- bus_set_iommu(&platform_bus_type, NULL);
-
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index a84c63518773..cd9b74ee24de 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -965,8 +965,6 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_unregister;
- bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
-
return 0;
err_unregister:
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2a8de975fe63..5b1af40221ec 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1083,8 +1083,8 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
/*
* This is a bit of a hack. Ideally we'd want to simply return this
- * value. However the IOMMU registration process will attempt to add
- * all devices to the IOMMU when bus_set_iommu() is called. In order
+ * value. However iommu_device_register() will attempt to add
+ * all devices to the IOMMU before we get that far. In order
* not to rely on global variables to track the IOMMU instance, we
* set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
@@ -1138,32 +1138,15 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
return ERR_PTR(err);
err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
- if (err)
- goto remove_sysfs;
-
- err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unregister;
-
-#ifdef CONFIG_PCI
- err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unset_platform_bus;
-#endif
+ if (err) {
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ERR_PTR(err);
+ }
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu);
return smmu;
-
-unset_platform_bus: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
-unregister:
- iommu_device_unregister(&smmu->iommu);
-remove_sysfs:
- iommu_device_sysfs_remove(&smmu->iommu);
-
- return ERR_PTR(err);
}
void tegra_smmu_remove(struct tegra_smmu *smmu)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 80151176ba12..b7c22802f57c 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -7,9 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/amba/bus.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/freezer.h>
#include <linux/interval_tree.h>
@@ -17,7 +15,6 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
@@ -25,6 +22,8 @@
#include <uapi/linux/virtio_iommu.h>
+#include "dma-iommu.h"
+
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -925,7 +924,7 @@ static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data)
{
- return dev->parent->fwnode == data;
+ return device_match_fwnode(dev->parent, data);
}
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
@@ -1006,7 +1005,7 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
-static bool viommu_capable(enum iommu_cap cap)
+static bool viommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -1156,26 +1155,6 @@ static int viommu_probe(struct virtio_device *vdev)
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&amba_bustype, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
- if (platform_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-
vdev->priv = viommu;
dev_info(dev, "input address: %u bits\n",
@@ -1184,9 +1163,6 @@ static int viommu_probe(struct virtio_device *vdev)
return 0;
-err_unregister:
- iommu_device_sysfs_remove(&viommu->iommu);
- iommu_device_unregister(&viommu->iommu);
err_free_vqs:
vdev->config->del_vqs(vdev);
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 20d2b9ec1227..fc00274070b6 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -497,7 +497,7 @@ static unsigned int ipoctal_chars_in_buffer(struct tty_struct *tty)
}
static void ipoctal_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
unsigned int cflag;
unsigned char mr1 = 0;
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index b1c3198355e7..74d449858a61 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -429,8 +429,11 @@ int ipack_device_init(struct ipack_device *dev)
dev->dev.bus = &ipack_bus_type;
dev->dev.release = ipack_device_release;
dev->dev.parent = dev->bus->parent;
- dev_set_name(&dev->dev,
+ ret = dev_set_name(&dev->dev,
"ipack-dev.%u.%u", dev->bus->bus_nr, dev->slot);
+ if (ret)
+ return ret;
+
device_initialize(&dev->dev);
if (dev->bus->ops->set_clockrate(dev, 8))
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index eb5ea5b69cfa..7ef9f5e696d3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -3,7 +3,7 @@ menu "IRQ chip support"
config IRQCHIP
def_bool y
- depends on OF_IRQ
+ depends on (OF_IRQ || ACPI_GENERIC_GSI)
config ARM_GIC
bool
@@ -481,6 +481,21 @@ config IMX_INTMUX
help
Support for the i.MX INTMUX interrupt multiplexer.
+config IMX_MU_MSI
+ tristate "i.MX MU used as MSI controller"
+ depends on OF && HAS_IOMEM
+ depends on ARCH_MXC || COMPILE_TEST
+ default m if ARCH_MXC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Provide a driver for the i.MX Messaging Unit block used as a
+ CPU-to-CPU MSI controller. This requires a specially crafted DT
+ to make use of this driver.
+
+ If unsure, say N
+
config LS1X_IRQ
bool "Loongson-1 Interrupt Controller"
depends on MACH_LOONGSON32
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b6acbca2248b..87b49a10962c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
+obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index b249d4df899e..6e1ac330d7a6 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt
#include <linux/acpi.h>
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index beead1a0191c..973ede0197e3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -11,9 +11,9 @@
#include <linux/cpu.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index a2163d32f17d..e1efdec9e9ac 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) "GICv3: " fmt
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 262658fd5f9e..34d58567b78d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -978,7 +978,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
u64 typer = gic_read_typer(ptr + GICR_TYPER);
u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
- /* Boot-time cleanip */
+ /* Boot-time cleanup */
if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
u64 val;
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
new file mode 100644
index 000000000000..229039eda1b1
--- /dev/null
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Freescale MU used as MSI controller
+ *
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ * Copyright 2022 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Based on drivers/mailbox/imx-mailbox.c
+ */
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/spinlock.h>
+
+#define IMX_MU_CHANS 4
+
+enum imx_mu_xcr {
+ IMX_MU_GIER,
+ IMX_MU_GCR,
+ IMX_MU_TCR,
+ IMX_MU_RCR,
+ IMX_MU_xCR_MAX,
+};
+
+enum imx_mu_xsr {
+ IMX_MU_SR,
+ IMX_MU_GSR,
+ IMX_MU_TSR,
+ IMX_MU_RSR,
+ IMX_MU_xSR_MAX
+};
+
+enum imx_mu_type {
+ IMX_MU_V2 = BIT(1),
+};
+
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+
+struct imx_mu_dcfg {
+ enum imx_mu_type type;
+ u32 xTR; /* Transmit Register0 */
+ u32 xRR; /* Receive Register0 */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
+};
+
+struct imx_mu_msi {
+ raw_spinlock_t lock;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ const struct imx_mu_dcfg *cfg;
+ unsigned long used;
+ struct clk *clk;
+};
+
+static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs)
+{
+ iowrite32(val, msi_data->regs + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs)
+{
+ return ioread32(msi_data->regs + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ return val;
+}
+
+static void imx_mu_msi_parent_mask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq));
+}
+
+static void imx_mu_msi_parent_unmask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0);
+}
+
+static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
+}
+
+static struct irq_chip imx_mu_msi_irq_chip = {
+ .name = "MU-MSI",
+ .irq_ack = irq_chip_ack_parent,
+};
+
+static struct msi_domain_ops imx_mu_msi_irq_ops = {
+};
+
+static struct msi_domain_info imx_mu_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &imx_mu_msi_irq_ops,
+ .chip = &imx_mu_msi_irq_chip,
+};
+
+static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+ u64 addr = msi_data->msiir_addr + 4 * data->hwirq;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = data->hwirq;
+}
+
+static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip imx_mu_msi_parent_chip = {
+ .name = "MU",
+ .irq_mask = imx_mu_msi_parent_mask_irq,
+ .irq_unmask = imx_mu_msi_parent_unmask_irq,
+ .irq_ack = imx_mu_msi_parent_ack_irq,
+ .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg,
+ .irq_set_affinity = imx_mu_msi_parent_set_affinity,
+};
+
+static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct imx_mu_msi *msi_data = domain->host_data;
+ unsigned long flags;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS);
+ if (pos < IMX_MU_CHANS)
+ __set_bit(pos, &msi_data->used);
+ else
+ err = -ENOSPC;
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &imx_mu_msi_parent_chip, msi_data,
+ handle_edge_irq, NULL, NULL);
+ return 0;
+}
+
+static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ __clear_bit(d->hwirq, &msi_data->used);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+}
+
+static const struct irq_domain_ops imx_mu_msi_domain_ops = {
+ .alloc = imx_mu_msi_domain_irq_alloc,
+ .free = imx_mu_msi_domain_irq_free,
+};
+
+static void imx_mu_msi_irq_handler(struct irq_desc *desc)
+{
+ struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+ int i;
+
+ status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]);
+
+ chained_irq_enter(chip, desc);
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ if (status & IMX_MU_xSR_RFn(msi_data, i))
+ generic_handle_domain_irq(msi_data->msi_domain, i);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
+{
+ struct fwnode_handle *fwnodes = dev_fwnode(dev);
+ struct irq_domain *parent;
+
+ /* Initialize MSI domain parent */
+ parent = irq_domain_create_linear(fwnodes,
+ IMX_MU_CHANS,
+ &imx_mu_msi_domain_ops,
+ msi_data);
+ if (!parent) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
+ &imx_mu_msi_domain_info,
+ parent);
+
+ if (!msi_data->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ irq_domain_set_pm_device(msi_data->msi_domain, dev);
+
+ return 0;
+}
+
+/* Register offset of different version MU IP */
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .type = 0,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {
+ [IMX_MU_SR] = 0x20,
+ [IMX_MU_GSR] = 0x20,
+ [IMX_MU_TSR] = 0x20,
+ [IMX_MU_RSR] = 0x20,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x24,
+ [IMX_MU_GCR] = 0x24,
+ [IMX_MU_TCR] = 0x24,
+ [IMX_MU_RCR] = 0x24,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .type = 0,
+ .xTR = 0x20,
+ .xRR = 0x40,
+ .xSR = {
+ [IMX_MU_SR] = 0x60,
+ [IMX_MU_GSR] = 0x60,
+ [IMX_MU_TSR] = 0x60,
+ [IMX_MU_RSR] = 0x60,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x64,
+ [IMX_MU_GCR] = 0x64,
+ [IMX_MU_TCR] = 0x64,
+ [IMX_MU_RCR] = 0x64,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
+ .type = IMX_MU_V2,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {
+ [IMX_MU_SR] = 0xC,
+ [IMX_MU_GSR] = 0x118,
+ [IMX_MU_TSR] = 0x124,
+ [IMX_MU_RSR] = 0x12C,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x110,
+ [IMX_MU_GCR] = 0x114,
+ [IMX_MU_TCR] = 0x120,
+ [IMX_MU_RCR] = 0x128
+ },
+};
+
+static int __init imx_mu_of_init(struct device_node *dn,
+ struct device_node *parent,
+ const struct imx_mu_dcfg *cfg)
+{
+ struct platform_device *pdev = of_find_device_by_node(dn);
+ struct device_link *pd_link_a;
+ struct device_link *pd_link_b;
+ struct imx_mu_msi *msi_data;
+ struct resource *res;
+ struct device *pd_a;
+ struct device *pd_b;
+ struct device *dev;
+ int ret;
+ int irq;
+
+ dev = &pdev->dev;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ msi_data->cfg = cfg;
+
+ msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side");
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side");
+ if (!res)
+ return -EIO;
+
+ msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, msi_data);
+
+ msi_data->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(msi_data->clk))
+ return PTR_ERR(msi_data->clk);
+
+ pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side");
+ if (IS_ERR(pd_a))
+ return PTR_ERR(pd_a);
+
+ pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side");
+ if (IS_ERR(pd_b))
+ return PTR_ERR(pd_b);
+
+ pd_link_a = device_link_add(dev, pd_a,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+ if (!pd_link_a) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_a;
+ }
+
+ pd_link_b = device_link_add(dev, pd_b,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+
+ if (!pd_link_b) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_b;
+ }
+
+ ret = imx_mu_msi_domains_init(msi_data, dev);
+ if (ret)
+ goto err_dm_init;
+
+ pm_runtime_enable(dev);
+
+ irq_set_chained_handler_and_data(irq,
+ imx_mu_msi_irq_handler,
+ msi_data);
+
+ return 0;
+
+err_dm_init:
+ device_link_remove(dev, pd_b);
+err_pd_b:
+ device_link_remove(dev, pd_a);
+err_pd_a:
+ return -EINVAL;
+}
+
+static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
+static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
+}
+
+static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
+}
+
+static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
+IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
+IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
+IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
+
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("Freescale MU MSI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index 853b3972dbe7..d8d48b1f7c29 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -6,8 +6,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -16,13 +15,41 @@
#define LS1021A_SCFGREVCR 0x200
struct ls_extirq_data {
- struct regmap *syscon;
- u32 intpcr;
+ void __iomem *intpcr;
+ raw_spinlock_t lock;
+ bool big_endian;
bool is_ls1021a_or_ls1043a;
u32 nirq;
struct irq_fwspec map[MAXIRQ];
};
+static void ls_extirq_intpcr_rmw(struct ls_extirq_data *priv, u32 mask,
+ u32 value)
+{
+ u32 intpcr;
+
+ /*
+ * Serialize concurrent calls to ls_extirq_set_type() from multiple
+ * IRQ descriptors, making sure the read-modify-write is atomic.
+ */
+ raw_spin_lock(&priv->lock);
+
+ if (priv->big_endian)
+ intpcr = ioread32be(priv->intpcr);
+ else
+ intpcr = ioread32(priv->intpcr);
+
+ intpcr &= ~mask;
+ intpcr |= value;
+
+ if (priv->big_endian)
+ iowrite32be(intpcr, priv->intpcr);
+ else
+ iowrite32(intpcr, priv->intpcr);
+
+ raw_spin_unlock(&priv->lock);
+}
+
static int
ls_extirq_set_type(struct irq_data *data, unsigned int type)
{
@@ -51,7 +78,8 @@ ls_extirq_set_type(struct irq_data *data, unsigned int type)
default:
return -EINVAL;
}
- regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
+
+ ls_extirq_intpcr_rmw(priv, mask, value);
return irq_chip_set_type_parent(data, type);
}
@@ -143,7 +171,6 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
static int __init
ls_extirq_of_init(struct device_node *node, struct device_node *parent)
{
-
struct irq_domain *domain, *parent_domain;
struct ls_extirq_data *priv;
int ret;
@@ -151,40 +178,52 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Cannot find parent domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_irq_find_host;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->syscon = syscon_node_to_regmap(node->parent);
- if (IS_ERR(priv->syscon)) {
- ret = PTR_ERR(priv->syscon);
- pr_err("Failed to lookup parent regmap\n");
- goto out;
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_alloc_priv;
}
- ret = of_property_read_u32(node, "reg", &priv->intpcr);
- if (ret) {
- pr_err("Missing INTPCR offset value\n");
- goto out;
+
+ /*
+ * All extirq OF nodes are under a scfg/syscon node with
+ * the 'ranges' property
+ */
+ priv->intpcr = of_iomap(node, 0);
+ if (!priv->intpcr) {
+ pr_err("Cannot ioremap OF node %pOF\n", node);
+ ret = -ENOMEM;
+ goto err_iomap;
}
ret = ls_extirq_parse_map(priv, node);
if (ret)
- goto out;
+ goto err_parse_map;
+ priv->big_endian = of_device_is_big_endian(parent);
priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
of_device_is_compatible(node, "fsl,ls1043a-extirq");
+ raw_spin_lock_init(&priv->lock);
domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
&extirq_domain_ops, priv);
- if (!domain)
+ if (!domain) {
ret = -ENOMEM;
+ goto err_add_hierarchy;
+ }
-out:
- if (ret)
- kfree(priv);
+ return 0;
+
+err_add_hierarchy:
+err_parse_map:
+ iounmap(priv->intpcr);
+err_iomap:
+ kfree(priv);
+err_alloc_priv:
+err_irq_find_host:
return ret;
}
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index b4927e425f7b..527c90e0920e 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
@@ -18,7 +19,6 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#include <linux/dma-iommu.h>
#define MSI_IRQS_PER_MSIR 32
#define MSI_MSIR_OFFSET 4
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 56bf502d9c67..2a349082af81 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -21,11 +21,33 @@
#define RTL_ICTL_IRR2 0x10
#define RTL_ICTL_IRR3 0x14
+#define RTL_ICTL_NUM_INPUTS 32
+
#define REG(x) (realtek_ictl_base + x)
static DEFINE_RAW_SPINLOCK(irq_lock);
static void __iomem *realtek_ictl_base;
+/*
+ * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering,
+ * placing IRQ 31 in the first four bits. A routing value of '0' means the
+ * interrupt is left disconnected. Routing values {1..15} connect to output
+ * lines {0..14}.
+ */
+#define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32))
+#define IRR_SHIFT(idx) ((idx * 4) % 32)
+
+static void write_irr(void __iomem *irr0, int idx, u32 value)
+{
+ unsigned int offset = IRR_OFFSET(idx);
+ unsigned int shift = IRR_SHIFT(idx);
+ u32 irr;
+
+ irr = readl(irr0 + offset) & ~(0xf << shift);
+ irr |= (value & 0xf) << shift;
+ writel(irr, irr0 + offset);
+}
+
static void realtek_ictl_unmask_irq(struct irq_data *i)
{
unsigned long flags;
@@ -62,8 +84,14 @@ static struct irq_chip realtek_ictl_irq = {
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
+ unsigned long flags;
+
irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ write_irr(REG(RTL_ICTL_IRR0), hw, 1);
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+
return 0;
}
@@ -95,90 +123,50 @@ out:
chained_irq_exit(chip, desc);
}
-/*
- * SoC interrupts are cascaded to MIPS CPU interrupts according to the
- * interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
- * the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
- * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
- * disconnected. Routing values {1..15} connect to output lines {0..14}.
- */
-static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
-{
- struct device_node *cpu_ictl;
- const __be32 *imap;
- u32 imaplen, soc_int, cpu_int, tmp, regs[4];
- int ret, i, irr_regs[] = {
- RTL_ICTL_IRR3,
- RTL_ICTL_IRR2,
- RTL_ICTL_IRR1,
- RTL_ICTL_IRR0,
- };
- u8 mips_irqs_set;
-
- ret = of_property_read_u32(node, "#address-cells", &tmp);
- if (ret || tmp)
- return -EINVAL;
-
- imap = of_get_property(node, "interrupt-map", &imaplen);
- if (!imap || imaplen % 3)
- return -EINVAL;
-
- mips_irqs_set = 0;
- memset(regs, 0, sizeof(regs));
- for (i = 0; i < imaplen; i += 3 * sizeof(u32)) {
- soc_int = be32_to_cpup(imap);
- if (soc_int > 31)
- return -EINVAL;
-
- cpu_ictl = of_find_node_by_phandle(be32_to_cpup(imap + 1));
- if (!cpu_ictl)
- return -EINVAL;
- ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
- of_node_put(cpu_ictl);
- if (ret || tmp != 1)
- return -EINVAL;
-
- cpu_int = be32_to_cpup(imap + 2);
- if (cpu_int > 7 || cpu_int < 2)
- return -EINVAL;
-
- if (!(mips_irqs_set & BIT(cpu_int))) {
- irq_set_chained_handler_and_data(cpu_int, realtek_irq_dispatch,
- domain);
- mips_irqs_set |= BIT(cpu_int);
- }
-
- /* Use routing values (1..6) for CPU interrupts (2..7) */
- regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
- imap += 3;
- }
-
- for (i = 0; i < 4; i++)
- writel(regs[i], REG(irr_regs[i]));
-
- return 0;
-}
-
static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent)
{
+ struct of_phandle_args oirq;
struct irq_domain *domain;
- int ret;
+ unsigned int soc_irq;
+ int parent_irq;
realtek_ictl_base = of_iomap(node, 0);
if (!realtek_ictl_base)
return -ENXIO;
- /* Disable all cascaded interrupts */
+ /* Disable all cascaded interrupts and clear routing */
writel(0, REG(RTL_ICTL_GIMR));
+ for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++)
+ write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0);
+
+ if (WARN_ON(!of_irq_count(node))) {
+ /*
+ * If DT contains no parent interrupts, assume MIPS CPU IRQ 2
+ * (HW0) is connected to the first output. This is the case for
+ * all known hardware anyway. "interrupt-map" is deprecated, so
+ * don't bother trying to parse that.
+ */
+ oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller");
+ oirq.args_count = 1;
+ oirq.args[0] = 2;
+
+ parent_irq = irq_create_of_mapping(&oirq);
+
+ of_node_put(oirq.np);
+ } else {
+ parent_irq = of_irq_get(node, 0);
+ }
- domain = irq_domain_add_simple(node, 32, 0,
- &irq_domain_ops, NULL);
+ if (parent_irq < 0)
+ return parent_irq;
+ else if (!parent_irq)
+ return -ENODEV;
- ret = map_interrupts(node, domain);
- if (ret) {
- pr_err("invalid interrupt map\n");
- return ret;
- }
+ domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain);
return 0;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index af17459c1a5c..e964a8dd8512 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2345,8 +2345,7 @@ HFC_init(void)
static void __exit
HFC_cleanup(void)
{
- if (timer_pending(&hfc_tl))
- del_timer_sync(&hfc_tl);
+ del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver);
}
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 00aecd67e348..a7e052c1db53 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -101,6 +101,7 @@ struct pca963x_led {
struct pca963x *chip;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
+ bool blinking;
u8 gdc;
u8 gfrq;
};
@@ -129,12 +130,21 @@ static int pca963x_brightness(struct pca963x_led *led,
switch (brightness) {
case LED_FULL:
- val = (ledout & ~mask) | (PCA963X_LED_ON << shift);
+ if (led->blinking) {
+ val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift);
+ ret = i2c_smbus_write_byte_data(client,
+ PCA963X_PWM_BASE +
+ led->led_num,
+ LED_FULL);
+ } else {
+ val = (ledout & ~mask) | (PCA963X_LED_ON << shift);
+ }
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
case LED_OFF:
val = ledout & ~mask;
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
+ led->blinking = false;
break;
default:
ret = i2c_smbus_write_byte_data(client,
@@ -144,7 +154,11 @@ static int pca963x_brightness(struct pca963x_led *led,
if (ret < 0)
return ret;
- val = (ledout & ~mask) | (PCA963X_LED_PWM << shift);
+ if (led->blinking)
+ val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift);
+ else
+ val = (ledout & ~mask) | (PCA963X_LED_PWM << shift);
+
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
}
@@ -181,6 +195,7 @@ static void pca963x_blink(struct pca963x_led *led)
}
mutex_unlock(&led->chip->mutex);
+ led->blinking = true;
}
static int pca963x_power_state(struct pca963x_led *led)
@@ -275,6 +290,8 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
led->gfrq = gfrq;
pca963x_blink(led);
+ led->led_cdev.brightness = LED_FULL;
+ pca963x_led_set(led_cdev, LED_FULL);
*delay_on = time_on;
*delay_off = time_off;
@@ -337,6 +354,7 @@ static int pca963x_register_leds(struct i2c_client *client,
led->led_cdev.brightness_set_blocking = pca963x_led_set;
if (hw_blink)
led->led_cdev.blink_set = pca963x_blink_set;
+ led->blinking = false;
init_data.fwnode = child;
/* for backwards compatibility */
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 61fe2ab910b8..b8228ca40454 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -317,19 +317,21 @@ static void do_attach(struct i2c_adapter *adapter)
if (x.running || strncmp(adapter->name, "uni-n", 5))
return;
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL);
}
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL);
}
}
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
index 496c4951ccb1..2a3e8d8ff8b5 100644
--- a/drivers/mailbox/apple-mailbox.c
+++ b/drivers/mailbox/apple-mailbox.c
@@ -17,6 +17,7 @@
*/
#include <linux/apple-mailbox.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
@@ -100,6 +102,7 @@ struct apple_mbox {
struct device *dev;
struct mbox_controller controller;
+ spinlock_t rx_lock;
};
static const struct of_device_id apple_mbox_of_match[];
@@ -112,6 +115,14 @@ static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
return !(mbox_ctrl & apple_mbox->hw->control_full);
}
+static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return mbox_ctrl & apple_mbox->hw->control_empty;
+}
+
static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
struct apple_mbox_msg *msg)
{
@@ -195,13 +206,15 @@ static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+static int apple_mbox_poll(struct apple_mbox *apple_mbox)
{
- struct apple_mbox *apple_mbox = data;
struct apple_mbox_msg msg;
+ int ret = 0;
- while (apple_mbox_hw_recv(apple_mbox, &msg) == 0)
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+ ret++;
+ }
/*
* The interrupt will keep firing even if there are no more messages
@@ -216,9 +229,50 @@ static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
apple_mbox->regs + apple_mbox->hw->irq_ack);
}
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ spin_lock(&apple_mbox->rx_lock);
+ apple_mbox_poll(apple_mbox);
+ spin_unlock(&apple_mbox->rx_lock);
+
return IRQ_HANDLED;
}
+static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&apple_mbox->rx_lock, flags);
+ ret = apple_mbox_poll(apple_mbox);
+ spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
+
+ return ret > 0;
+}
+
+static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, deadline)) {
+ if (apple_mbox_hw_send_empty(apple_mbox)) {
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
static int apple_mbox_chan_startup(struct mbox_chan *chan)
{
struct apple_mbox *apple_mbox = chan->con_priv;
@@ -250,6 +304,8 @@ static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
static const struct mbox_chan_ops apple_mbox_ops = {
.send_data = apple_mbox_chan_send_data,
+ .peek_data = apple_mbox_chan_peek_data,
+ .flush = apple_mbox_chan_flush,
.startup = apple_mbox_chan_startup,
.shutdown = apple_mbox_chan_shutdown,
};
@@ -304,6 +360,7 @@ static int apple_mbox_probe(struct platform_device *pdev)
mbox->controller.txdone_irq = true;
mbox->controller.of_xlate = apple_mbox_of_xlate;
mbox->chan.con_priv = mbox;
+ spin_lock_init(&mbox->rx_lock);
irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
if (!irqname)
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index fda16f76401e..bf6e86b0ed09 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -622,15 +622,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- if (rc < 0)
- return rc;
+ if (!rc)
+ return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
- if (rc < 0) {
+ if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- return rc;
+ return -EIO;
}
return 0;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 02922073c9ef..20f2ec880ad6 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -904,7 +904,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
.xTR = 0x20,
.xRR = 0x40,
.xSR = {0x60, 0x60, 0x60, 0x60},
- .xCR = {0x64, 0x64, 0x64, 0x64},
+ .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
@@ -927,7 +927,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
@@ -938,7 +938,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
@@ -949,7 +949,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
@@ -960,7 +960,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct of_device_id imx_mu_dt_ids[] = {
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 4e34854d1238..cfacb3f320a6 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -62,6 +62,7 @@ struct mpfs_mbox {
struct mbox_controller controller;
struct device *dev;
int irq;
+ void __iomem *ctrl_base;
void __iomem *mbox_base;
void __iomem *int_reg;
struct mbox_chan chans[1];
@@ -73,7 +74,7 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
{
u32 status;
- status = readl_relaxed(mbox->mbox_base + SERVICES_SR_OFFSET);
+ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
return status & SCB_STATUS_BUSY_MASK;
}
@@ -99,29 +100,27 @@ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
for (index = 0; index < (msg->cmd_data_size / 4); index++)
writel_relaxed(word_buf[index],
- mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ mbox->mbox_base + msg->mbox_offset + index * 0x4);
if (extra_bits) {
u8 i;
u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4);
u8 *byte_buf = msg->cmd_data + byte_off;
- val = readl_relaxed(mbox->mbox_base +
- MAILBOX_REG_OFFSET + index * 0x4);
+ val = readl_relaxed(mbox->mbox_base + msg->mbox_offset + index * 0x4);
for (i = 0u; i < extra_bits; i++) {
val &= ~(0xffu << (i * 8u));
val |= (byte_buf[i] << (i * 8u));
}
- writel_relaxed(val,
- mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ writel_relaxed(val, mbox->mbox_base + msg->mbox_offset + index * 0x4);
}
}
opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
- writel_relaxed(tx_trigger, mbox->mbox_base + SERVICES_CR_OFFSET);
+ writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
return 0;
}
@@ -141,7 +140,7 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
if (!mpfs_mbox_busy(mbox)) {
for (i = 0; i < num_words; i++) {
response->resp_msg[i] =
- readl_relaxed(mbox->mbox_base + MAILBOX_REG_OFFSET
+ readl_relaxed(mbox->mbox_base
+ mbox->resp_offset + i * 0x4);
}
}
@@ -200,14 +199,18 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
if (!mbox)
return -ENOMEM;
- mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
- if (IS_ERR(mbox->mbox_base))
- return PTR_ERR(mbox->mbox_base);
+ mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mbox->ctrl_base))
+ return PTR_ERR(mbox->ctrl_base);
mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, &regs);
if (IS_ERR(mbox->int_reg))
return PTR_ERR(mbox->int_reg);
+ mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, &regs);
+ if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs
+ mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET;
+
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index ebfa33a40fce..3c2bc0ca454c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -676,7 +676,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
!pcc_mbox_ctrl->txdone_irq) {
- pr_err("Plaform Interrupt flag must be set to 1");
+ pr_err("Platform Interrupt flag must be set to 1");
rc = -EINVAL;
goto err;
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 80a54d81412e..f1f0e87a79e6 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -142,7 +142,7 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 31d58b7d55fe..7e27acf6c0cc 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -308,7 +308,8 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
goto err_mbox;
ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
- IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, name, ipcc);
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD, name, ipcc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
goto err_req_irq;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f2c5a7e06fa9..3427555b0cca 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -401,7 +401,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
}
if (bypass_torture_test(dc)) {
- if ((get_random_int() & 3) == 3)
+ if (prandom_u32_max(4) == 3)
goto skip;
else
goto rescale;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 09c7ed2650ca..9c5ef818ca36 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b)
{
BUG_ON(b->hold_count);
- if (!b->state) /* fast case */
+ /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
+ if (!smp_load_acquire(&b->state)) /* fast case */
return;
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_DIRTY, &b->state));
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
- unlikely(test_bit(B_READING, &b->state)))
+ unlikely(test_bit_acquire(B_READING, &b->state)))
continue;
if (!b->hold_count) {
@@ -1058,7 +1059,7 @@ found_buffer:
* If the user called both dm_bufio_prefetch and dm_bufio_get on
* the same buffer, it would deadlock if we waited.
*/
- if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
+ if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
return NULL;
b->hold_count++;
@@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b)
* invalid buffer.
*/
if ((b->read_error || b->write_error) &&
- !test_bit(B_READING, &b->state) &&
+ !test_bit_acquire(B_READING, &b->state) &&
!test_bit(B_WRITING, &b->state) &&
!test_bit(B_DIRTY, &b->state)) {
__unlink_buffer(b);
@@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move);
static void forget_buffer_locked(struct dm_buffer *b)
{
- if (likely(!b->hold_count) && likely(!b->state)) {
+ if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
__unlink_buffer(b);
__free_buffer_wake(b);
}
@@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
if (!(gfp & __GFP_FS) ||
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
- if (test_bit(B_READING, &b->state) ||
+ if (test_bit_acquire(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
return false;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index c05fc3436cef..06eb31af626f 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -166,7 +166,7 @@ struct dm_cache_policy_type {
struct dm_cache_policy_type *real;
/*
- * Policies may store a hint for each each cache block.
+ * Policies may store a hint for each cache block.
* Currently the size of this hint must be 0 or 4 bytes but we
* expect to relax this in future.
*/
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 811b0a5379d0..2f1cc66d2641 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
reason = "max discard sectors smaller than a region";
if (reason) {
- DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
+ DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
dest_dev, reason);
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 98976aaa9db9..6b3f867d0b70 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
hc = __get_name_cell(new);
if (hc) {
- DMWARN("Unable to change %s on mapped device %s to one that "
- "already exists: %s",
- change_uuid ? "uuid" : "name",
- param->name, new);
+ DMERR("Unable to change %s on mapped device %s to one that "
+ "already exists: %s",
+ change_uuid ? "uuid" : "name",
+ param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data);
@@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
*/
hc = __get_name_cell(param->name);
if (!hc) {
- DMWARN("Unable to rename non-existent device, %s to %s%s",
- param->name, change_uuid ? "uuid " : "", new);
+ DMERR("Unable to rename non-existent device, %s to %s%s",
+ param->name, change_uuid ? "uuid " : "", new);
up_write(&_hash_lock);
kfree(new_data);
return ERR_PTR(-ENXIO);
@@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
* Does this device already have a uuid?
*/
if (change_uuid && hc->uuid) {
- DMWARN("Unable to change uuid of mapped device %s to %s "
- "because uuid is already set to %s",
- param->name, new, hc->uuid);
+ DMERR("Unable to change uuid of mapped device %s to %s "
+ "because uuid is already set to %s",
+ param->name, new, hc->uuid);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data);
@@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
static int check_name(const char *name)
{
if (strchr(name, '/')) {
- DMWARN("invalid device name");
+ DMERR("invalid device name");
return -EINVAL;
}
@@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
down_read(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
- DMWARN("device has been removed from the dev hash table.");
+ DMERR("device has been removed from the dev hash table.");
goto out;
}
@@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
- DMWARN("Invalid new mapped device name or uuid string supplied.");
+ DMERR("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
}
@@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
if (geostr < param->data ||
invalid_str(geostr, (void *) param + param_size)) {
- DMWARN("Invalid geometry supplied.");
+ DMERR("Invalid geometry supplied.");
goto out;
}
@@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
indata + 1, indata + 2, indata + 3, &dummy);
if (x != 4) {
- DMWARN("Unable to interpret geometry settings.");
+ DMERR("Unable to interpret geometry settings.");
goto out;
}
if (indata[0] > 65535 || indata[1] > 255 ||
indata[2] > 255 || indata[3] > ULONG_MAX) {
- DMWARN("Geometry exceeds range limits.");
+ DMERR("Geometry exceeds range limits.");
goto out;
}
@@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table,
char *target_params;
if (!param->target_count) {
- DMWARN("populate_table: no targets specified");
+ DMERR("populate_table: no targets specified");
return -EINVAL;
}
@@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table,
r = next_target(spec, next, end, &spec, &target_params);
if (r) {
- DMWARN("unable to find target");
+ DMERR("unable to find target");
return r;
}
@@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table,
(sector_t) spec->length,
target_params);
if (r) {
- DMWARN("error adding target to table");
+ DMERR("error adding target to table");
return r;
}
@@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t)) &&
!dm_table_get_wildcard_target(t)) {
- DMWARN("can't replace immutable target type %s",
- immutable_target_type->name);
+ DMERR("can't replace immutable target type %s",
+ immutable_target_type->name);
r = -EINVAL;
goto err_unlock_md_type;
}
@@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {
- DMWARN("unable to set up device queue for new table.");
+ DMERR("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
- DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
- dm_get_md_type(md), dm_table_get_type(t));
+ DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
+ dm_get_md_type(md), dm_table_get_type(t));
r = -EINVAL;
goto err_unlock_md_type;
}
@@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
- DMWARN("device has been removed from the dev hash table.");
+ DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock);
r = -ENXIO;
goto err_destroy_table;
@@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (tmsg < (struct dm_target_msg *) param->data ||
invalid_str(tmsg->message, (void *) param + param_size)) {
- DMWARN("Invalid target message parameters.");
+ DMERR("Invalid target message parameters.");
r = -EINVAL;
goto out;
}
r = dm_split_args(&argc, &argv, tmsg->message);
if (r) {
- DMWARN("Failed to split target message parameters");
+ DMERR("Failed to split target message parameters");
goto out;
}
if (!argc) {
- DMWARN("Empty message received.");
+ DMERR("Empty message received.");
r = -EINVAL;
goto out_argv;
}
@@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
ti = dm_table_find_target(table, tmsg->sector);
if (!ti) {
- DMWARN("Target message sector outside device.");
+ DMERR("Target message sector outside device.");
r = -EINVAL;
} else if (ti->type->message)
r = ti->type->message(ti, argc, argv, result, maxlen);
else {
- DMWARN("Target type does not support messages");
+ DMERR("Target type does not support messages");
r = -EINVAL;
}
@@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
if ((DM_VERSION_MAJOR != version[0]) ||
(DM_VERSION_MINOR < version[1])) {
- DMWARN("ioctl interface mismatch: "
- "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
- DM_VERSION_MAJOR, DM_VERSION_MINOR,
- DM_VERSION_PATCHLEVEL,
- version[0], version[1], version[2], cmd);
+ DMERR("ioctl interface mismatch: "
+ "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+ DM_VERSION_MAJOR, DM_VERSION_MINOR,
+ DM_VERSION_PATCHLEVEL,
+ version[0], version[1], version[2], cmd);
r = -EINVAL;
}
@@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
if (cmd == DM_DEV_CREATE_CMD) {
if (!*param->name) {
- DMWARN("name not supplied when creating device");
+ DMERR("name not supplied when creating device");
return -EINVAL;
}
} else if (*param->uuid && *param->name) {
- DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
+ DMERR("only supply one of name or uuid, cmd(%u)", cmd);
return -EINVAL;
}
@@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) {
- DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
+ DMERR("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY;
}
@@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
(sector_t) spec_array[i]->length,
target_params_array[i]);
if (r) {
- DMWARN("error adding target to table");
+ DMERR("error adding target to table");
goto err_destroy_table;
}
}
@@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {
- DMWARN("unable to set up device queue for new table.");
+ DMERR("unable to set up device queue for new table.");
goto err_destroy_table;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c640be453313..54263679a7b1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* of the "sync" directive.
*
* With reshaping capability added, we must ensure that
- * that the "sync" directive is disallowed during the reshape.
+ * the "sync" directive is disallowed during the reshape.
*/
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
@@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
/*
* Adjust data_offset and new_data_offset on all disk members of @rs
- * for out of place reshaping if requested by contructor
+ * for out of place reshaping if requested by constructor
*
* We need free space at the beginning of each raid disk for forward
* and at the end for backward reshapes which userspace has to provide
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3001b10a3fbf..a41209a43506 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
dm_requeue_original_request(tio, true);
break;
default:
- DMWARN("unimplemented target endio return value: %d", r);
+ DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
@@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
break;
default:
- DMWARN("unimplemented target map return value: %d", r);
+ DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8326f9fe0e91..f105a71915ab 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
return 2; /* this wasn't a stats message */
if (r == -EINVAL)
- DMWARN("Invalid parameters for message %s", argv[0]);
+ DMCRIT("Invalid parameters for message %s", argv[0]);
return r;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d8034ff0cb24..078da18bb86d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMWARN("%s: %pg too small for target: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdev,
- (unsigned long long)start,
- (unsigned long long)len,
- (unsigned long long)dev_size);
+ DMERR("%s: %pg too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdev,
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
return 1;
}
@@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) {
- DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)start,
- zone_sectors, bdev);
+ DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ zone_sectors, bdev);
return 1;
}
@@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* the sector range.
*/
if (len & (zone_sectors - 1)) {
- DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)len,
- zone_sectors, bdev);
+ DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ zone_sectors, bdev);
return 1;
}
}
@@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if (start & (logical_block_size_sectors - 1)) {
- DMWARN("%s: start=%llu not aligned to h/w "
- "logical block size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)start,
- limits->logical_block_size, bdev);
+ DMERR("%s: start=%llu not aligned to h/w "
+ "logical block size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdev);
return 1;
}
if (len & (logical_block_size_sectors - 1)) {
- DMWARN("%s: len=%llu not aligned to h/w "
- "logical block size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)len,
- limits->logical_block_size, bdev);
+ DMERR("%s: len=%llu not aligned to h/w "
+ "logical block size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ limits->logical_block_size, bdev);
return 1;
}
@@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
}
}
if (!found) {
- DMWARN("%s: device %s not in table devices list",
- dm_device_name(ti->table->md), d->name);
+ DMERR("%s: device %s not in table devices list",
+ dm_device_name(ti->table->md), d->name);
return;
}
if (refcount_dec_and_test(&dd->count)) {
@@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
}
if (remaining) {
- DMWARN("%s: table line %u (start sect %llu len %llu) "
- "not aligned to h/w logical block size %u",
- dm_device_name(t->md), i,
- (unsigned long long) ti->begin,
- (unsigned long long) ti->len,
- limits->logical_block_size);
+ DMERR("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %u",
+ dm_device_name(t->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
return -EINVAL;
}
@@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
struct dm_md_mempools *pools;
if (unlikely(type == DM_TYPE_NONE)) {
- DMWARN("no table type is set, can't allocate mempools");
+ DMERR("no table type is set, can't allocate mempools");
return -EINVAL;
}
@@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk)
* Get a disk whose integrity profile reflects the table's profile.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
+static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
@@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t)
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMWARN("%s: conflict with existing integrity profile: "
- "%s profile mismatch",
- dm_device_name(t->md),
- template_disk->disk_name);
+ DMERR("%s: conflict with existing integrity profile: "
+ "%s profile mismatch",
+ dm_device_name(t->md),
+ template_disk->disk_name);
return 1;
}
@@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {
- DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
+ DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
dm_destroy_crypto_profile(profile);
return -EINVAL;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 8a00cc42e498..ccf5b852fbf7 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
- if (v->use_tasklet) {
- /*
- * Allow verify_wq to preempt softirq since verification in
- * tasklet will fall-back to using it for error handling
- * (or if the bufio cache doesn't have required hashes).
- */
- wq_flags |= WQ_HIGHPRI;
- }
+ /*
+ * Using WQ_HIGHPRI improves throughput and completion latency by
+ * reducing wait times when reading from a dm-verity device.
+ *
+ * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
+ * allows verify_wq to preempt softirq since verification in tasklet
+ * will fall-back to using it for error handling (or if the bufio cache
+ * doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 60549b65c799..95a1ee3d314e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
- DMWARN("Start sector is beyond the geometry limits.");
+ DMERR("Start sector is beyond the geometry limits.");
return -EINVAL;
}
@@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio)
/* The target will handle the io */
return;
default:
- DMWARN("unimplemented target endio return value: %d", r);
+ DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
@@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone)
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
- DMWARN("unimplemented target map return value: %d", r);
+ DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
}
@@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
- DMWARN("unable to allocate device, out of memory.");
+ DMERR("unable to allocate device, out of memory.");
return NULL;
}
@@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->minors = 1;
md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops;
- md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 79c73330020b..832d8566e165 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2994,7 +2994,7 @@ static int r5l_load_log(struct r5l_log *log)
}
create:
if (create_super) {
- log->last_cp_seq = prandom_u32();
+ log->last_cp_seq = get_random_u32();
cp = 0;
r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 9b7bcdce6e44..303d02b1d71c 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -870,7 +870,7 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g = tpg_colors[col].g;
b = tpg_colors[col].b;
} else if (tpg->pattern == TPG_PAT_NOISE) {
- r = g = b = prandom_u32_max(256);
+ r = g = b = get_random_u8();
} else if (k == TPG_COLOR_RANDOM) {
r = g = b = tpg->qual_offset + prandom_u32_max(196);
} else if (k >= TPG_COLOR_RAMP) {
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 0d51bdf01f43..f6deac85962e 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -445,8 +445,8 @@ static int pt3_fetch_thread(void *data)
pt3_proc_dma(adap);
delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
- set_current_state(TASK_UNINTERRUPTIBLE);
- freezable_schedule_hrtimeout_range(&delay,
+ set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
+ schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.c b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
index 232cab508f48..8bd09589fb15 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
@@ -104,8 +104,8 @@ retry:
break;
case 2:
rds.block |= V4L2_RDS_BLOCK_ERROR;
- rds.lsb = prandom_u32_max(256);
- rds.msb = prandom_u32_max(256);
+ rds.lsb = get_random_u8();
+ rds.msb = get_random_u8();
break;
case 3: /* Skip block altogether */
if (i)
diff --git a/drivers/media/test-drivers/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index 64e3e4cb30c2..6cc32eb54f9d 100644
--- a/drivers/media/test-drivers/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
@@ -210,7 +210,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
/* Fill 10% of the values within range -3 and 3, zero the others */
for (i = 0; i < size; i++) {
- unsigned int rand = get_random_int();
+ unsigned int rand = get_random_u32();
if (rand % 10)
tch_buf[i] = 0;
@@ -221,7 +221,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
static inline int get_random_pressure(void)
{
- return get_random_int() % VIVID_PRESSURE_LIMIT;
+ return prandom_u32_max(VIVID_PRESSURE_LIMIT);
}
static void vivid_tch_buf_set(struct v4l2_pix_format *f,
@@ -272,7 +272,7 @@ void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
return;
if (test_pat_idx == 0)
- dev->tch_pat_random = get_random_int();
+ dev->tch_pat_random = get_random_u32();
rand = dev->tch_pat_random;
switch (test_pattern) {
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index e012b21c4fd7..790787f0eba8 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -425,12 +425,14 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
{
- u16 frame_size = le16_to_cpu(
- fc_usb->uintf->cur_altsetting->endpoint[0].desc.wMaxPacketSize);
- int bufsize = B2C2_USB_NUM_ISO_URB * B2C2_USB_FRAMES_PER_ISO *
- frame_size, i, j, ret;
+ struct usb_host_interface *alt = fc_usb->uintf->cur_altsetting;
+ u16 frame_size;
+ int bufsize, i, j, ret;
int buffer_offset = 0;
+ frame_size = usb_endpoint_maxp(&alt->endpoint[0].desc);
+ bufsize = B2C2_USB_NUM_ISO_URB * B2C2_USB_FRAMES_PER_ISO * frame_size;
+
deb_ts("creating %d iso-urbs with %d frames each of %d bytes size = %d.\n",
B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
@@ -501,17 +503,21 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
- /* use the alternate setting with the larges buffer */
- int ret = usb_set_interface(fc_usb->udev, 0, 1);
+ struct usb_host_interface *alt;
+ int ret;
+ /* use the alternate setting with the largest buffer */
+ ret = usb_set_interface(fc_usb->udev, 0, 1);
if (ret) {
err("set interface failed.");
return ret;
}
- if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
+ alt = fc_usb->uintf->cur_altsetting;
+
+ if (alt->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
+ if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 97f312020516..c95a2229f4fa 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 744051b52e12..215fb483efb0 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -20,6 +20,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -35,198 +36,6 @@ unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
/* ------------------------------------------------------------------------
- * Video formats
- */
-
-static struct uvc_format_desc uvc_fmts[] = {
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:0 (NV12)",
- .guid = UVC_GUID_FORMAT_NV12,
- .fcc = V4L2_PIX_FMT_NV12,
- },
- {
- .name = "MJPEG",
- .guid = UVC_GUID_FORMAT_MJPEG,
- .fcc = V4L2_PIX_FMT_MJPEG,
- },
- {
- .name = "YVU 4:2:0 (YV12)",
- .guid = UVC_GUID_FORMAT_YV12,
- .fcc = V4L2_PIX_FMT_YVU420,
- },
- {
- .name = "YUV 4:2:0 (I420)",
- .guid = UVC_GUID_FORMAT_I420,
- .fcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .name = "YUV 4:2:0 (M420)",
- .guid = UVC_GUID_FORMAT_M420,
- .fcc = V4L2_PIX_FMT_M420,
- },
- {
- .name = "YUV 4:2:2 (UYVY)",
- .guid = UVC_GUID_FORMAT_UYVY,
- .fcc = V4L2_PIX_FMT_UYVY,
- },
- {
- .name = "Greyscale 8-bit (Y800)",
- .guid = UVC_GUID_FORMAT_Y800,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (Y8 )",
- .guid = UVC_GUID_FORMAT_Y8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (D3DFMT_L8)",
- .guid = UVC_GUID_FORMAT_D3DFMT_L8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "IR 8-bit (L8_IR)",
- .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_Y10,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "Greyscale 12-bit (Y12 )",
- .guid = UVC_GUID_FORMAT_Y12,
- .fcc = V4L2_PIX_FMT_Y12,
- },
- {
- .name = "Greyscale 16-bit (Y16 )",
- .guid = UVC_GUID_FORMAT_Y16,
- .fcc = V4L2_PIX_FMT_Y16,
- },
- {
- .name = "BGGR Bayer (BY8 )",
- .guid = UVC_GUID_FORMAT_BY8,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "BGGR Bayer (BA81)",
- .guid = UVC_GUID_FORMAT_BA81,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "GBRG Bayer (GBRG)",
- .guid = UVC_GUID_FORMAT_GBRG,
- .fcc = V4L2_PIX_FMT_SGBRG8,
- },
- {
- .name = "GRBG Bayer (GRBG)",
- .guid = UVC_GUID_FORMAT_GRBG,
- .fcc = V4L2_PIX_FMT_SGRBG8,
- },
- {
- .name = "RGGB Bayer (RGGB)",
- .guid = UVC_GUID_FORMAT_RGGB,
- .fcc = V4L2_PIX_FMT_SRGGB8,
- },
- {
- .name = "RGB565",
- .guid = UVC_GUID_FORMAT_RGBP,
- .fcc = V4L2_PIX_FMT_RGB565,
- },
- {
- .name = "BGR 8:8:8 (BGR3)",
- .guid = UVC_GUID_FORMAT_BGR3,
- .fcc = V4L2_PIX_FMT_BGR24,
- },
- {
- .name = "H.264",
- .guid = UVC_GUID_FORMAT_H264,
- .fcc = V4L2_PIX_FMT_H264,
- },
- {
- .name = "H.265",
- .guid = UVC_GUID_FORMAT_H265,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
- {
- .name = "Greyscale 8 L/R (Y8I)",
- .guid = UVC_GUID_FORMAT_Y8I,
- .fcc = V4L2_PIX_FMT_Y8I,
- },
- {
- .name = "Greyscale 12 L/R (Y12I)",
- .guid = UVC_GUID_FORMAT_Y12I,
- .fcc = V4L2_PIX_FMT_Y12I,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_Z16,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Bayer 10-bit (SRGGB10P)",
- .guid = UVC_GUID_FORMAT_RW10,
- .fcc = V4L2_PIX_FMT_SRGGB10P,
- },
- {
- .name = "Bayer 16-bit (SBGGR16)",
- .guid = UVC_GUID_FORMAT_BG16,
- .fcc = V4L2_PIX_FMT_SBGGR16,
- },
- {
- .name = "Bayer 16-bit (SGBRG16)",
- .guid = UVC_GUID_FORMAT_GB16,
- .fcc = V4L2_PIX_FMT_SGBRG16,
- },
- {
- .name = "Bayer 16-bit (SRGGB16)",
- .guid = UVC_GUID_FORMAT_RG16,
- .fcc = V4L2_PIX_FMT_SRGGB16,
- },
- {
- .name = "Bayer 16-bit (SGRBG16)",
- .guid = UVC_GUID_FORMAT_GR16,
- .fcc = V4L2_PIX_FMT_SGRBG16,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_INVZ,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_INVI,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "IR:Depth 26-bit (INZI)",
- .guid = UVC_GUID_FORMAT_INZI,
- .fcc = V4L2_PIX_FMT_INZI,
- },
- {
- .name = "4-bit Depth Confidence (Packed)",
- .guid = UVC_GUID_FORMAT_CNF4,
- .fcc = V4L2_PIX_FMT_CNF4,
- },
- {
- .name = "HEVC",
- .guid = UVC_GUID_FORMAT_HEVC,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
-};
-
-/* ------------------------------------------------------------------------
* Utility functions
*/
@@ -245,19 +54,6 @@ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
return NULL;
}
-static struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
-{
- unsigned int len = ARRAY_SIZE(uvc_fmts);
- unsigned int i;
-
- for (i = 0; i < len; ++i) {
- if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
- return &uvc_fmts[i];
- }
-
- return NULL;
-}
-
static enum v4l2_colorspace uvc_colorspace(const u8 primaries)
{
static const enum v4l2_colorspace colorprimaries[] = {
@@ -329,90 +125,6 @@ static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients)
return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */
}
-/*
- * Simplify a fraction using a simple continued fraction decomposition. The
- * idea here is to convert fractions such as 333333/10000000 to 1/30 using
- * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
- * arbitrary parameters to remove non-significative terms from the simple
- * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
- * respectively seems to give nice results.
- */
-void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
- unsigned int n_terms, unsigned int threshold)
-{
- u32 *an;
- u32 x, y, r;
- unsigned int i, n;
-
- an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
- if (an == NULL)
- return;
-
- /*
- * Convert the fraction to a simple continued fraction. See
- * https://en.wikipedia.org/wiki/Continued_fraction
- * Stop if the current term is bigger than or equal to the given
- * threshold.
- */
- x = *numerator;
- y = *denominator;
-
- for (n = 0; n < n_terms && y != 0; ++n) {
- an[n] = x / y;
- if (an[n] >= threshold) {
- if (n < 2)
- n++;
- break;
- }
-
- r = x - an[n] * y;
- x = y;
- y = r;
- }
-
- /* Expand the simple continued fraction back to an integer fraction. */
- x = 0;
- y = 1;
-
- for (i = n; i > 0; --i) {
- r = y;
- y = an[i-1] * y + x;
- x = r;
- }
-
- *numerator = y;
- *denominator = x;
- kfree(an);
-}
-
-/*
- * Convert a fraction to a frame interval in 100ns multiples. The idea here is
- * to compute numerator / denominator * 10000000 using 32 bit fixed point
- * arithmetic only.
- */
-u32 uvc_fraction_to_interval(u32 numerator, u32 denominator)
-{
- u32 multiplier;
-
- /* Saturate the result if the operation would overflow. */
- if (denominator == 0 ||
- numerator/denominator >= ((u32)-1)/10000000)
- return (u32)-1;
-
- /*
- * Divide both the denominator and the multiplier by two until
- * numerator * multiplier doesn't overflow. If anyone knows a better
- * algorithm please let me know.
- */
- multiplier = 10000000;
- while (numerator > ((u32)-1)/multiplier) {
- multiplier /= 2;
- denominator /= 2;
- }
-
- return denominator ? numerator * multiplier / denominator : 0;
-}
-
/* ------------------------------------------------------------------------
* Terminal and unit management
*/
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 4cc3fa6b8c98..f4d4c33b6dfb 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -386,7 +386,7 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
mutex_unlock(&stream->mutex);
denominator = 10000000;
- uvc_simplify_fraction(&numerator, &denominator, 8, 333);
+ v4l2_simplify_fraction(&numerator, &denominator, 8, 333);
memset(parm, 0, sizeof(*parm));
parm->type = stream->type;
@@ -427,7 +427,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
else
timeperframe = parm->parm.output.timeperframe;
- interval = uvc_fraction_to_interval(timeperframe.numerator,
+ interval = v4l2_fraction_to_interval(timeperframe.numerator,
timeperframe.denominator);
uvc_dbg(stream->dev, FORMAT, "Setting frame interval to %u/%u (%u)\n",
timeperframe.numerator, timeperframe.denominator, interval);
@@ -481,7 +481,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
/* Return the actual frame period. */
timeperframe.numerator = probe.dwFrameInterval;
timeperframe.denominator = 10000000;
- uvc_simplify_fraction(&timeperframe.numerator,
+ v4l2_simplify_fraction(&timeperframe.numerator,
&timeperframe.denominator, 8, 333);
if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -1275,7 +1275,7 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
fival->discrete.numerator =
frame->dwFrameInterval[index];
fival->discrete.denominator = 10000000;
- uvc_simplify_fraction(&fival->discrete.numerator,
+ v4l2_simplify_fraction(&fival->discrete.numerator,
&fival->discrete.denominator, 8, 333);
} else {
fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
@@ -1285,11 +1285,11 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
fival->stepwise.max.denominator = 10000000;
fival->stepwise.step.numerator = frame->dwFrameInterval[2];
fival->stepwise.step.denominator = 10000000;
- uvc_simplify_fraction(&fival->stepwise.min.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.min.numerator,
&fival->stepwise.min.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.max.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.max.numerator,
&fival->stepwise.max.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.step.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.step.numerator,
&fival->stepwise.step.denominator, 8, 333);
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 24c911aeebce..df93db259312 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -42,144 +42,6 @@
#define UVC_EXT_GPIO_UNIT_ID 0x100
/* ------------------------------------------------------------------------
- * GUIDs
- */
-#define UVC_GUID_UVC_CAMERA \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
-#define UVC_GUID_UVC_OUTPUT \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
-#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
-#define UVC_GUID_UVC_PROCESSING \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}
-#define UVC_GUID_UVC_SELECTOR \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
-#define UVC_GUID_EXT_GPIO_CONTROLLER \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
-
-#define UVC_GUID_FORMAT_MJPEG \
- { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YUY2 \
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YUY2_ISIGHT \
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_NV12 \
- { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YV12 \
- { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_I420 \
- { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_UYVY \
- { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y800 \
- { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y8 \
- { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y10 \
- { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y12 \
- { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y16 \
- { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BY8 \
- { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BA81 \
- { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GBRG \
- { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GRBG \
- { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RGGB \
- { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BG16 \
- { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GB16 \
- { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RG16 \
- { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GR16 \
- { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RGBP \
- { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BGR3 \
- { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
- 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
-#define UVC_GUID_FORMAT_M420 \
- { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_H264 \
- { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_H265 \
- { 'H', '2', '6', '5', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y8I \
- { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y12I \
- { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Z16 \
- { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RW10 \
- { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_INVZ \
- { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
- 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
-#define UVC_GUID_FORMAT_INZI \
- { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
- 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
-#define UVC_GUID_FORMAT_INVI \
- { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
- 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
-#define UVC_GUID_FORMAT_CNF4 \
- { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_D3DFMT_L8 \
- {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
- {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_HEVC \
- { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-
-/* ------------------------------------------------------------------------
* Driver specific constants.
*/
@@ -283,12 +145,6 @@ struct uvc_control {
struct uvc_fh *handle; /* File handle that last changed the control. */
};
-struct uvc_format_desc {
- char *name;
- u8 guid[16];
- u32 fcc;
-};
-
/*
* The term 'entity' refers to both UVC units and UVC terminals.
*
@@ -911,9 +767,6 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
/* Utility functions */
-void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
- unsigned int n_terms, unsigned int threshold);
-u32 uvc_fraction_to_interval(u32 numerator, u32 denominator);
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index e0fbe6ba4b6c..40f56e044640 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -484,3 +484,89 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+
+/*
+ * Simplify a fraction using a simple continued fraction decomposition. The
+ * idea here is to convert fractions such as 333333/10000000 to 1/30 using
+ * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
+ * arbitrary parameters to remove non-significative terms from the simple
+ * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
+ * respectively seems to give nice results.
+ */
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold)
+{
+ u32 *an;
+ u32 x, y, r;
+ unsigned int i, n;
+
+ an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
+ if (an == NULL)
+ return;
+
+ /*
+ * Convert the fraction to a simple continued fraction. See
+ * https://en.wikipedia.org/wiki/Continued_fraction
+ * Stop if the current term is bigger than or equal to the given
+ * threshold.
+ */
+ x = *numerator;
+ y = *denominator;
+
+ for (n = 0; n < n_terms && y != 0; ++n) {
+ an[n] = x / y;
+ if (an[n] >= threshold) {
+ if (n < 2)
+ n++;
+ break;
+ }
+
+ r = x - an[n] * y;
+ x = y;
+ y = r;
+ }
+
+ /* Expand the simple continued fraction back to an integer fraction. */
+ x = 0;
+ y = 1;
+
+ for (i = n; i > 0; --i) {
+ r = y;
+ y = an[i-1] * y + x;
+ x = r;
+ }
+
+ *numerator = y;
+ *denominator = x;
+ kfree(an);
+}
+EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
+
+/*
+ * Convert a fraction to a frame interval in 100ns multiples. The idea here is
+ * to compute numerator / denominator * 10000000 using 32 bit fixed point
+ * arithmetic only.
+ */
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
+{
+ u32 multiplier;
+
+ /* Saturate the result if the operation would overflow. */
+ if (denominator == 0 ||
+ numerator/denominator >= ((u32)-1)/10000000)
+ return (u32)-1;
+
+ /*
+ * Divide both the denominator and the multiplier by two until
+ * numerator * multiplier doesn't overflow. If anyone knows a better
+ * algorithm please let me know.
+ */
+ multiplier = 10000000;
+ while (numerator > ((u32)-1)/multiplier) {
+ multiplier /= 2;
+ denominator /= 2;
+ }
+
+ return denominator ? numerator * multiplier / denominator : 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f9ee957072c3..52c7020c9d19 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -620,7 +620,6 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
mpt_ioctl_header __user *uhdr = (void __user *) arg;
mpt_ioctl_header khdr;
- int iocnum;
unsigned iocnumX;
int nonblock = (file->f_flags & O_NONBLOCK);
int ret;
@@ -634,12 +633,11 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
ret = -ENXIO; /* (-6) No such device or address */
- /* Verify intended MPT adapter - set iocnum and the adapter
+ /* Verify intended MPT adapter - set iocnumX and the adapter
* pointer (iocp)
*/
iocnumX = khdr.iocnum & 0xFF;
- if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
- (iocp == NULL))
+ if ((mpt_verify_adapter(iocnumX, &iocp) < 0) || (iocp == NULL))
return -ENODEV;
if (!iocp->active) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index c3dd1fe8d8c9..8b93856de432 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -589,8 +589,8 @@ config LPC_SCH
config INTEL_SOC_PMIC
bool "Support for Crystal Cove PMIC"
- depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
- depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
+ depends on (X86 && ACPI) || COMPILE_TEST
depends on I2C_DESIGNWARE_PLATFORM=y
select MFD_CORE
select REGMAP_I2C
@@ -938,6 +938,22 @@ config MFD_MT6360
PMIC part includes 2-channel BUCKs and 2-channel LDOs
LDO part includes 4-channel LDOs
+config MFD_MT6370
+ tristate "MediaTek MT6370 SubPMIC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C
+ help
+ Say Y here to enable MT6370 SubPMIC functional support.
+ It consists of a single cell battery charger with ADC monitoring, RGB
+ LEDs, dual channel flashlight, WLED backlight driver, display bias
+ voltage supply, one general purpose LDO, and the USB Type-C & PD
+ controller complies with the latest USB Type-C and PD standards.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370".
+
config MFD_MT6397
tristate "MediaTek MT6397 PMIC Support"
select MFD_CORE
@@ -1117,6 +1133,16 @@ config MFD_SPMI_PMIC
Say M here if you want to include support for the SPMI PMIC
series as a module. The module will be called "qcom-spmi-pmic".
+config MFD_SY7636A
+ tristate "Silergy SY7636A voltage regulator"
+ depends on I2C
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Enable support for Silergy SY7636A voltage regulator.
+
+ To enable support for building sub-devices as modules,
+ choose M here.
+
config MFD_RDC321X
tristate "RDC R-321x southbridge"
select MFD_CORE
@@ -1149,6 +1175,18 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
+config MFD_RT5120
+ tristate "Richtek RT5120 Power Management IC"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ The enables support for Richtek RT5120 PMIC. It includes four high
+ efficiency buck converters and one LDO voltage regulator. The device
+ is targeted at providing the CPU voltage, memory, I/O and peripheral
+ power rails in home entertainment devices.
+
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1224,7 +1262,7 @@ config MFD_SI476X_CORE
module will be called si476x-core.
config MFD_SIMPLE_MFD_I2C
- tristate "Simple Multi-Functional Device support (I2C)"
+ tristate
depends on I2C
select MFD_CORE
select REGMAP_I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0004b7e86220..7ed3ef4a698c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -175,6 +175,11 @@ obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
obj-$(CONFIG_MFD_MP2629) += mp2629.o
+obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
+obj-$(CONFIG_MFD_MT6370) += mt6370.o
+mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
+obj-$(CONFIG_MFD_MT6397) += mt6397.o
+
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
@@ -237,16 +242,13 @@ obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT4831) += rt4831.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
+obj-$(CONFIG_MFD_RT5120) += rt5120.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
-intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
-obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
+obj-$(CONFIG_INTEL_SOC_PMIC) += intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
-obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
-mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
-obj-$(CONFIG_MFD_MT6397) += mt6397.o
obj-$(CONFIG_INTEL_SOC_PMIC_MRFLD) += intel_soc_pmic_mrfld.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 0a80d82c6858..a26e473507c7 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -453,6 +453,7 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
+ regmap_reg_range(DA9062AA_CONFIG_J, DA9062AA_CONFIG_J),
regmap_reg_range(DA9062AA_GP_ID_0, DA9062AA_GP_ID_19),
};
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 37e5e02a1d05..823595bcc9b7 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -69,7 +69,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
return irq;
tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
@@ -84,6 +84,19 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return 0;
}
+static int mx25_tsadc_unset_irq(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static void mx25_tsadc_setup_clk(struct platform_device *pdev,
struct mx25_tsadc *tsadc)
{
@@ -171,18 +184,21 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tsadc);
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ mx25_tsadc_unset_irq(pdev);
+
+ return ret;
}
static int mx25_tsadc_remove(struct platform_device *pdev)
{
- struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq) {
- irq_set_chained_handler_and_data(irq, NULL, NULL);
- irq_domain_remove(tsadc->domain);
- }
+ mx25_tsadc_unset_irq(pdev);
return 0;
}
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 417b0355d904..b45b1346ab54 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -20,7 +20,9 @@
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/htcpld.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
struct htcpld_chip {
@@ -58,8 +60,8 @@ struct htcpld_data {
uint irq_start;
int nirqs;
uint chained_irq;
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
+ struct gpio_desc *int_reset_gpio_hi;
+ struct gpio_desc *int_reset_gpio_lo;
/* htcpld info */
struct htcpld_chip *chip;
@@ -196,9 +198,9 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
* be asserted.
*/
if (htcpld->int_reset_gpio_hi)
- gpio_set_value(htcpld->int_reset_gpio_hi, 1);
+ gpiod_set_value(htcpld->int_reset_gpio_hi, 1);
if (htcpld->int_reset_gpio_lo)
- gpio_set_value(htcpld->int_reset_gpio_lo, 0);
+ gpiod_set_value(htcpld->int_reset_gpio_lo, 0);
return IRQ_HANDLED;
}
@@ -352,7 +354,7 @@ static int htcpld_register_chip_i2c(
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = plat_chip_data->addr;
- strlcpy(info.type, "htcpld-chip", I2C_NAME_SIZE);
+ strscpy(info.type, "htcpld-chip", I2C_NAME_SIZE);
info.platform_data = chip;
/* Add the I2C device. This calls the probe() function. */
@@ -562,34 +564,28 @@ static int htcpld_core_probe(struct platform_device *pdev)
return ret;
/* Request the GPIO(s) for the int reset and set them up */
- if (pdata->int_reset_gpio_hi) {
- ret = gpio_request(pdata->int_reset_gpio_hi, "htcpld-core");
- if (ret) {
- /*
- * If it failed, that sucks, but we can probably
- * continue on without it.
- */
- dev_warn(dev, "Unable to request int_reset_gpio_hi -- interrupts may not work\n");
- htcpld->int_reset_gpio_hi = 0;
- } else {
- htcpld->int_reset_gpio_hi = pdata->int_reset_gpio_hi;
- gpio_set_value(htcpld->int_reset_gpio_hi, 1);
- }
+ htcpld->int_reset_gpio_hi = gpiochip_request_own_desc(&htcpld->chip[2].chip_out,
+ 7, "htcpld-core", GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(htcpld->int_reset_gpio_hi)) {
+ /*
+ * If it failed, that sucks, but we can probably
+ * continue on without it.
+ */
+ htcpld->int_reset_gpio_hi = NULL;
+ dev_warn(dev, "Unable to request int_reset_gpio_hi -- interrupts may not work\n");
}
- if (pdata->int_reset_gpio_lo) {
- ret = gpio_request(pdata->int_reset_gpio_lo, "htcpld-core");
- if (ret) {
- /*
- * If it failed, that sucks, but we can probably
- * continue on without it.
- */
- dev_warn(dev, "Unable to request int_reset_gpio_lo -- interrupts may not work\n");
- htcpld->int_reset_gpio_lo = 0;
- } else {
- htcpld->int_reset_gpio_lo = pdata->int_reset_gpio_lo;
- gpio_set_value(htcpld->int_reset_gpio_lo, 0);
- }
+ htcpld->int_reset_gpio_lo = gpiochip_request_own_desc(&htcpld->chip[2].chip_out,
+ 0, "htcpld-core", GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(htcpld->int_reset_gpio_lo)) {
+ /*
+ * If it failed, that sucks, but we can probably
+ * continue on without it.
+ */
+ htcpld->int_reset_gpio_lo = NULL;
+ dev_warn(dev, "Unable to request int_reset_gpio_lo -- interrupts may not work\n");
}
dev_info(dev, "Initialized successfully\n");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index bb08b7a73fe1..dde31c50a632 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/pxa2xx_ssp.h>
#include "intel-lpss.h"
@@ -73,8 +74,18 @@ static void intel_lpss_pci_remove(struct pci_dev *pdev)
static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
+static const struct property_entry spt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_SPT_SSP),
+ { }
+};
+
+static const struct software_node spt_spi_node = {
+ .properties = spt_spi_properties,
+};
+
static const struct intel_lpss_platform_info spt_info = {
.clk_rate = 120000000,
+ .swnode = &spt_spi_node,
};
static const struct property_entry spt_i2c_properties[] = {
@@ -108,8 +119,18 @@ static const struct intel_lpss_platform_info spt_uart_info = {
.swnode = &uart_node,
};
+static const struct property_entry bxt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BXT_SSP),
+ { }
+};
+
+static const struct software_node bxt_spi_node = {
+ .properties = bxt_spi_properties,
+};
+
static const struct intel_lpss_platform_info bxt_info = {
.clk_rate = 100000000,
+ .swnode = &bxt_spi_node,
};
static const struct intel_lpss_platform_info bxt_uart_info = {
@@ -166,6 +187,20 @@ static const struct intel_lpss_platform_info glk_i2c_info = {
.swnode = &glk_i2c_node,
};
+static const struct property_entry cnl_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_CNL_SSP),
+ { }
+};
+
+static const struct software_node cnl_spi_node = {
+ .properties = cnl_spi_properties,
+};
+
+static const struct intel_lpss_platform_info cnl_info = {
+ .clk_rate = 120000000,
+ .swnode = &cnl_spi_node,
+};
+
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.swnode = &spt_i2c_node,
@@ -176,12 +211,26 @@ static const struct intel_lpss_platform_info ehl_i2c_info = {
.swnode = &bxt_i2c_node,
};
+static const struct property_entry tgl_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_CNL_SSP),
+ { }
+};
+
+static const struct software_node tgl_spi_node = {
+ .properties = tgl_spi_properties,
+};
+
+static const struct intel_lpss_platform_info tgl_info = {
+ .clk_rate = 100000000,
+ .swnode = &tgl_spi_node,
+};
+
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x02c5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c7), (kernel_ulong_t)&spt_uart_info },
@@ -189,18 +238,18 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&cnl_info },
/* CML-H */
{ PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&cnl_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
@@ -255,8 +304,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* ICL-LP */
{ PCI_VDEVICE(INTEL, 0x34a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x34a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x34c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c7), (kernel_ulong_t)&spt_uart_info },
@@ -264,15 +313,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x34e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&cnl_info },
/* ICL-N */
{ PCI_VDEVICE(INTEL, 0x38a8), (kernel_ulong_t)&spt_uart_info },
/* TGL-H */
{ PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
@@ -281,8 +330,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&tgl_info },
/* EHL */
{ PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
@@ -301,8 +350,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc7), (kernel_ulong_t)&spt_uart_info },
@@ -310,12 +359,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&cnl_info },
/* ADL-P */
{ PCI_VDEVICE(INTEL, 0x51a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x51a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info },
@@ -325,12 +374,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&tgl_info },
/* ADL-M */
{ PCI_VDEVICE(INTEL, 0x54a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x54a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x54c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c7), (kernel_ulong_t)&bxt_uart_info },
@@ -338,7 +387,7 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x54e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&tgl_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
@@ -358,39 +407,39 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* RPL-S */
{ PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7a4c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4e), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4f), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a5c), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7a7c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7e), (kernel_ulong_t)&bxt_uart_info },
/* ADL-S */
{ PCI_VDEVICE(INTEL, 0x7aa8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7aa9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7acc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7ace), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acf), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7adc), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
/* MTL-P */
{ PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
@@ -424,8 +473,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-LP */
{ PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
@@ -433,12 +482,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&cnl_info },
/* TGL-LP */
{ PCI_VDEVICE(INTEL, 0xa0a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0c5), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c6), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c7), (kernel_ulong_t)&bxt_uart_info },
@@ -448,15 +497,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0db), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dd), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0e8), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0e9), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0ea), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0eb), (kernel_ulong_t)&spt_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&cnl_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
@@ -479,14 +528,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-H */
{ PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&cnl_info },
/* CML-V */
{ PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
index f4d0d72573c8..7e3319e5b22f 100644
--- a/drivers/mfd/intel-m10-bmc.c
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -21,6 +21,7 @@ enum m10bmc_type {
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
+ { .name = "d5005bmc-sec-update" }
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 1c7577b881ff..282b8fd08009 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -140,7 +140,7 @@ static void chtdc_ti_shutdown(struct i2c_client *i2c)
disable_irq(pmic->irq);
}
-static int __maybe_unused chtdc_ti_suspend(struct device *dev)
+static int chtdc_ti_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -149,7 +149,7 @@ static int __maybe_unused chtdc_ti_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused chtdc_ti_resume(struct device *dev)
+static int chtdc_ti_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -158,7 +158,7 @@ static int __maybe_unused chtdc_ti_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
static const struct acpi_device_id chtdc_ti_acpi_ids[] = {
{ "INT33F5" },
@@ -169,7 +169,7 @@ MODULE_DEVICE_TABLE(acpi, chtdc_ti_acpi_ids);
static struct i2c_driver chtdc_ti_i2c_driver = {
.driver = {
.name = "intel_soc_pmic_chtdc_ti",
- .pm = &chtdc_ti_pm_ops,
+ .pm = pm_sleep_ptr(&chtdc_ti_pm_ops),
.acpi_match_table = chtdc_ti_acpi_ids,
},
.probe_new = chtdc_ti_probe,
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
deleted file mode 100644
index b824e15f4d22..000000000000
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel SoC PMIC MFD Driver
- *
- * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
- *
- * Author: Yang, Bin <bin.yang@intel.com>
- * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/platform_data/x86/soc.h>
-#include <linux/pwm.h>
-#include <linux/regmap.h>
-
-#include "intel_soc_pmic_core.h"
-
-/* PWM consumed by the Intel GFX */
-static struct pwm_lookup crc_pwm_lookup[] = {
- PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
-};
-
-static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
-{
- struct device *dev = &i2c->dev;
- struct intel_soc_pmic_config *config;
- struct intel_soc_pmic *pmic;
- int ret;
-
- if (soc_intel_is_byt())
- config = &intel_soc_pmic_config_byt_crc;
- else
- config = &intel_soc_pmic_config_cht_crc;
-
- pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- dev_set_drvdata(dev, pmic);
-
- pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
- if (IS_ERR(pmic->regmap))
- return PTR_ERR(pmic->regmap);
-
- pmic->irq = i2c->irq;
-
- ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
- config->irq_flags | IRQF_ONESHOT,
- 0, config->irq_chip,
- &pmic->irq_chip_data);
- if (ret)
- return ret;
-
- ret = enable_irq_wake(pmic->irq);
- if (ret)
- dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
-
- /* Add lookup table for crc-pwm */
- pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
-
- /* To distuingish this domain from the GPIO/charger's irqchip domains */
- irq_domain_update_bus_token(regmap_irq_get_domain(pmic->irq_chip_data),
- DOMAIN_BUS_NEXUS);
-
- ret = mfd_add_devices(dev, -1, config->cell_dev,
- config->n_cell_devs, NULL, 0,
- regmap_irq_get_domain(pmic->irq_chip_data));
- if (ret)
- goto err_del_irq_chip;
-
- return 0;
-
-err_del_irq_chip:
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
- return ret;
-}
-
-static void intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
-
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
-
- /* remove crc-pwm lookup table */
- pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
-
- mfd_remove_devices(&i2c->dev);
-}
-
-static void intel_soc_pmic_shutdown(struct i2c_client *i2c)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
-
- disable_irq(pmic->irq);
-
- return;
-}
-
-#if defined(CONFIG_PM_SLEEP)
-static int intel_soc_pmic_suspend(struct device *dev)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
-
- disable_irq(pmic->irq);
-
- return 0;
-}
-
-static int intel_soc_pmic_resume(struct device *dev)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
-
- enable_irq(pmic->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(intel_soc_pmic_pm_ops, intel_soc_pmic_suspend,
- intel_soc_pmic_resume);
-
-static const struct i2c_device_id intel_soc_pmic_i2c_id[] = {
- { }
-};
-MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
-
-#if defined(CONFIG_ACPI)
-static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
- { "INT33FD" },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match);
-#endif
-
-static struct i2c_driver intel_soc_pmic_i2c_driver = {
- .driver = {
- .name = "intel_soc_pmic_i2c",
- .pm = &intel_soc_pmic_pm_ops,
- .acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
- },
- .probe = intel_soc_pmic_i2c_probe,
- .remove = intel_soc_pmic_i2c_remove,
- .id_table = intel_soc_pmic_i2c_id,
- .shutdown = intel_soc_pmic_shutdown,
-};
-
-module_i2c_driver(intel_soc_pmic_i2c_driver);
-
-MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
-MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
deleted file mode 100644
index d490685845eb..000000000000
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Intel SoC PMIC MFD Driver
- *
- * Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
- *
- * Author: Yang, Bin <bin.yang@intel.com>
- * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
- */
-
-#ifndef __INTEL_SOC_PMIC_CORE_H__
-#define __INTEL_SOC_PMIC_CORE_H__
-
-struct intel_soc_pmic_config {
- unsigned long irq_flags;
- struct mfd_cell *cell_dev;
- int n_cell_devs;
- const struct regmap_config *regmap_config;
- const struct regmap_irq_chip *irq_chip;
-};
-
-extern struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc;
-extern struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc;
-
-#endif /* __INTEL_SOC_PMIC_CORE_H__ */
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 5bb0367bd974..b1548a933dc3 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -2,18 +2,21 @@
/*
* Device access for Crystal Cove PMIC
*
- * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2012-2014, 2022 Intel Corporation. All rights reserved.
*
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/regmap.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
-
-#include "intel_soc_pmic_core.h"
+#include <linux/platform_data/x86/soc.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
#define CRYSTAL_COVE_MAX_REGISTER 0xC6
@@ -132,7 +135,20 @@ static const struct regmap_irq_chip crystal_cove_irq_chip = {
.mask_base = CRYSTAL_COVE_REG_MIRQLVL1,
};
-struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc = {
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+ PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
+struct crystal_cove_config {
+ unsigned long irq_flags;
+ struct mfd_cell *cell_dev;
+ int n_cell_devs;
+ const struct regmap_config *regmap_config;
+ const struct regmap_irq_chip *irq_chip;
+};
+
+static const struct crystal_cove_config crystal_cove_config_byt_crc = {
.irq_flags = IRQF_TRIGGER_RISING,
.cell_dev = crystal_cove_byt_dev,
.n_cell_devs = ARRAY_SIZE(crystal_cove_byt_dev),
@@ -140,10 +156,121 @@ struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc = {
.irq_chip = &crystal_cove_irq_chip,
};
-struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc = {
+static const struct crystal_cove_config crystal_cove_config_cht_crc = {
.irq_flags = IRQF_TRIGGER_RISING,
.cell_dev = crystal_cove_cht_dev,
.n_cell_devs = ARRAY_SIZE(crystal_cove_cht_dev),
.regmap_config = &crystal_cove_regmap_config,
.irq_chip = &crystal_cove_irq_chip,
};
+
+static int crystal_cove_i2c_probe(struct i2c_client *i2c)
+{
+ const struct crystal_cove_config *config;
+ struct device *dev = &i2c->dev;
+ struct intel_soc_pmic *pmic;
+ int ret;
+
+ if (soc_intel_is_byt())
+ config = &crystal_cove_config_byt_crc;
+ else
+ config = &crystal_cove_config_cht_crc;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+
+ pmic->irq = i2c->irq;
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ config->irq_flags | IRQF_ONESHOT,
+ 0, config->irq_chip, &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ ret = enable_irq_wake(pmic->irq);
+ if (ret)
+ dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
+
+ /* Add lookup table for crc-pwm */
+ pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ /* To distuingish this domain from the GPIO/charger's irqchip domains */
+ irq_domain_update_bus_token(regmap_irq_get_domain(pmic->irq_chip_data),
+ DOMAIN_BUS_NEXUS);
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_NONE, config->cell_dev,
+ config->n_cell_devs, NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+ if (ret)
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ return ret;
+}
+
+static void crystal_cove_i2c_remove(struct i2c_client *i2c)
+{
+ /* remove crc-pwm lookup table */
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ mfd_remove_devices(&i2c->dev);
+}
+
+static void crystal_cove_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(i2c);
+
+ disable_irq(pmic->irq);
+
+ return;
+}
+
+static int crystal_cove_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int crystal_cove_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(crystal_cove_pm_ops, crystal_cove_suspend, crystal_cove_resume);
+
+static const struct acpi_device_id crystal_cove_acpi_match[] = {
+ { "INT33FD" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, crystal_cove_acpi_match);
+
+static struct i2c_driver crystal_cove_i2c_driver = {
+ .driver = {
+ .name = "crystal_cove_i2c",
+ .pm = pm_sleep_ptr(&crystal_cove_pm_ops),
+ .acpi_match_table = crystal_cove_acpi_match,
+ },
+ .probe_new = crystal_cove_i2c_probe,
+ .remove = crystal_cove_i2c_remove,
+ .shutdown = crystal_cove_shutdown,
+};
+
+module_i2c_driver(crystal_cove_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
+MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>");
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index 348439a3fbbd..39006297f3d2 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -175,6 +175,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"lp8788-irq", irqd);
if (ret) {
+ irq_domain_remove(lp->irqdm);
dev_err(lp->dev, "failed to create a thread for IRQ_N\n");
return ret;
}
@@ -188,4 +189,6 @@ void lp8788_irq_exit(struct lp8788 *lp)
{
if (lp->irq)
free_irq(lp->irq, lp->irqdm);
+ if (lp->irqdm)
+ irq_domain_remove(lp->irqdm);
}
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index e7c601bca9ef..724a5712b36b 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -195,8 +195,16 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (ret)
return ret;
- return mfd_add_devices(lp->dev, -1, lp8788_devs,
- ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(lp->dev, -1, lp8788_devs,
+ ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ if (ret)
+ goto err_exit_irq;
+
+ return 0;
+
+err_exit_irq:
+ lp8788_irq_exit(lp);
+ return ret;
}
static void lp8788_remove(struct i2c_client *cl)
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 650951f89f1c..7b1c597b6879 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -959,7 +959,7 @@ static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
info = &lpc_chipset_info[priv->chipset];
pdata->version = info->iTCO_version;
- strlcpy(pdata->name, info->name, sizeof(pdata->name));
+ strscpy(pdata->name, info->name, sizeof(pdata->name));
cell->platform_data = pdata;
cell->pdata_size = sizeof(*pdata);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8b058200d5ad..16d1861e9682 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -105,7 +105,7 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
.ids = ids,
};
- strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
+ strscpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
acpi_dev_for_each_child(parent, match_device_ids, &wd);
adev = wd.adev;
} else {
@@ -368,6 +368,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
{
struct platform_device *pdev;
const struct mfd_cell *cell;
+ struct mfd_of_node_entry *of_entry, *tmp;
int *level = data;
if (dev->type != &mfd_dev_type)
@@ -382,6 +383,12 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
if (cell->swnode)
device_remove_software_node(&pdev->dev);
+ list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
+ if (of_entry->dev == &pdev->dev) {
+ list_del(&of_entry->list);
+ kfree(of_entry);
+ }
+
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
diff --git a/drivers/mfd/mt6370.c b/drivers/mfd/mt6370.c
new file mode 100644
index 000000000000..cf19cce2fdc0
--- /dev/null
+++ b/drivers/mfd/mt6370.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "mt6370.h"
+
+#define MT6370_REG_DEV_INFO 0x100
+#define MT6370_REG_CHG_IRQ1 0x1C0
+#define MT6370_REG_CHG_MASK1 0x1E0
+#define MT6370_REG_MAXADDR 0x1FF
+
+#define MT6370_VENID_MASK GENMASK(7, 4)
+
+#define MT6370_NUM_IRQREGS 16
+#define MT6370_USBC_I2CADDR 0x4E
+#define MT6370_MAX_ADDRLEN 2
+
+#define MT6370_VENID_RT5081 0x8
+#define MT6370_VENID_RT5081A 0xA
+#define MT6370_VENID_MT6370 0xE
+#define MT6370_VENID_MT6371 0xF
+#define MT6370_VENID_MT6372P 0x9
+#define MT6370_VENID_MT6372CP 0xB
+
+static const struct regmap_irq mt6370_irqs[] = {
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHGON, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TREG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_AICR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_MIVR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_PWR_RDY, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FL_CHG_VINOVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VSYSUV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VSYSOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VBATOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VINOVPCHG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_COLD, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_COOL, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_WARM, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_HOT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_STATC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_FAULT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_STATC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_BATABS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_ADPBAD, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_RVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TSHUTDOWN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_IINMEAS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_ICCMEAS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHGDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_WDTMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_SSFINISH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_RECHG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TERM, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_IEOC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_ADC_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_PUMPX_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_BATUV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_MIDOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_OLP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_ATTACH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DETACH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_STPDONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_VBUSDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHGDET, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DCDT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_VGOK, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_WDTMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_UC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_OC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_OV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_SWON, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_UVP_D, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_UVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_OVP_D, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_STRBPIN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_TORPIN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_TX, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_LVF, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_SHORT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_SHORT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_STRB, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB, 8),
+ REGMAP_IRQ_REG_LINE(mT6370_IRQ_FLED2_STRB_TO, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB_TO, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_TOR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_TOR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OTP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_VDDA_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_VDDA_UV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_LDO_OC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BLED_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BLED_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VNEG_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VPOS_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_BST_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VNEG_SCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VPOS_SCP, 8),
+};
+
+static const struct regmap_irq_chip mt6370_irq_chip = {
+ .name = "mt6370-irqs",
+ .status_base = MT6370_REG_CHG_IRQ1,
+ .mask_base = MT6370_REG_CHG_MASK1,
+ .num_regs = MT6370_NUM_IRQREGS,
+ .irqs = mt6370_irqs,
+ .num_irqs = ARRAY_SIZE(mt6370_irqs),
+};
+
+static const struct resource mt6370_regulator_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VPOS_SCP, "db_vpos_scp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VNEG_SCP, "db_vneg_scp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_BST_OCP, "db_vbst_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VPOS_OCP, "db_vpos_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VNEG_OCP, "db_vneg_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_LDO_OC, "ldo_oc"),
+};
+
+static const struct mfd_cell mt6370_devices[] = {
+ MFD_CELL_OF("mt6370-adc",
+ NULL, NULL, 0, 0, "mediatek,mt6370-adc"),
+ MFD_CELL_OF("mt6370-charger",
+ NULL, NULL, 0, 0, "mediatek,mt6370-charger"),
+ MFD_CELL_OF("mt6370-flashlight",
+ NULL, NULL, 0, 0, "mediatek,mt6370-flashlight"),
+ MFD_CELL_OF("mt6370-indicator",
+ NULL, NULL, 0, 0, "mediatek,mt6370-indicator"),
+ MFD_CELL_OF("mt6370-tcpc",
+ NULL, NULL, 0, 0, "mediatek,mt6370-tcpc"),
+ MFD_CELL_RES("mt6370-regulator", mt6370_regulator_irqs),
+};
+
+static const struct mfd_cell mt6370_exclusive_devices[] = {
+ MFD_CELL_OF("mt6370-backlight",
+ NULL, NULL, 0, 0, "mediatek,mt6370-backlight"),
+};
+
+static const struct mfd_cell mt6372_exclusive_devices[] = {
+ MFD_CELL_OF("mt6370-backlight",
+ NULL, NULL, 0, 0, "mediatek,mt6372-backlight"),
+};
+
+static int mt6370_check_vendor_info(struct device *dev, struct regmap *rmap,
+ int *vid)
+{
+ unsigned int devinfo;
+ int ret;
+
+ ret = regmap_read(rmap, MT6370_REG_DEV_INFO, &devinfo);
+ if (ret)
+ return ret;
+
+ *vid = FIELD_GET(MT6370_VENID_MASK, devinfo);
+ switch (*vid) {
+ case MT6370_VENID_RT5081:
+ case MT6370_VENID_RT5081A:
+ case MT6370_VENID_MT6370:
+ case MT6370_VENID_MT6371:
+ case MT6370_VENID_MT6372P:
+ case MT6370_VENID_MT6372CP:
+ return 0;
+ default:
+ dev_err(dev, "Unknown Vendor ID 0x%02x\n", devinfo);
+ return -ENODEV;
+ }
+}
+
+static int mt6370_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ struct mt6370_info *info = context;
+ const u8 *u8_buf = reg_buf;
+ u8 bank_idx, bank_addr;
+ int ret;
+
+ bank_idx = u8_buf[0];
+ bank_addr = u8_buf[1];
+
+ ret = i2c_smbus_read_i2c_block_data(info->i2c[bank_idx], bank_addr,
+ val_size, val_buf);
+ if (ret < 0)
+ return ret;
+
+ if (ret != val_size)
+ return -EIO;
+
+ return 0;
+}
+
+static int mt6370_regmap_write(void *context, const void *data, size_t count)
+{
+ struct mt6370_info *info = context;
+ const u8 *u8_buf = data;
+ u8 bank_idx, bank_addr;
+ int len = count - MT6370_MAX_ADDRLEN;
+
+ bank_idx = u8_buf[0];
+ bank_addr = u8_buf[1];
+
+ return i2c_smbus_write_i2c_block_data(info->i2c[bank_idx], bank_addr,
+ len, data + MT6370_MAX_ADDRLEN);
+}
+
+static const struct regmap_bus mt6370_regmap_bus = {
+ .read = mt6370_regmap_read,
+ .write = mt6370_regmap_write,
+};
+
+static const struct regmap_config mt6370_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = MT6370_REG_MAXADDR,
+};
+
+static int mt6370_probe(struct i2c_client *i2c)
+{
+ struct mt6370_info *info;
+ struct i2c_client *usbc_i2c;
+ struct regmap *regmap;
+ struct device *dev = &i2c->dev;
+ int ret, vid;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ usbc_i2c = devm_i2c_new_dummy_device(dev, i2c->adapter,
+ MT6370_USBC_I2CADDR);
+ if (IS_ERR(usbc_i2c))
+ return dev_err_probe(dev, PTR_ERR(usbc_i2c),
+ "Failed to register USBC I2C client\n");
+
+ /* Assign I2C client for PMU and TypeC */
+ info->i2c[MT6370_PMU_I2C] = i2c;
+ info->i2c[MT6370_USBC_I2C] = usbc_i2c;
+
+ regmap = devm_regmap_init(dev, &mt6370_regmap_bus,
+ info, &mt6370_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ ret = mt6370_check_vendor_info(dev, regmap, &vid);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check vendor info\n");
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, i2c->irq,
+ IRQF_ONESHOT, -1, &mt6370_irq_chip,
+ &info->irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add irq chip\n");
+
+ switch (vid) {
+ case MT6370_VENID_MT6372P:
+ case MT6370_VENID_MT6372CP:
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6372_exclusive_devices,
+ ARRAY_SIZE(mt6372_exclusive_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+ break;
+ default:
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6370_exclusive_devices,
+ ARRAY_SIZE(mt6370_exclusive_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+ break;
+ }
+
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add the exclusive devices\n");
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6370_devices, ARRAY_SIZE(mt6370_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+}
+
+static const struct of_device_id mt6370_match_table[] = {
+ { .compatible = "mediatek,mt6370" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_match_table);
+
+static struct i2c_driver mt6370_driver = {
+ .driver = {
+ .name = "mt6370",
+ .of_match_table = mt6370_match_table,
+ },
+ .probe_new = mt6370_probe,
+};
+module_i2c_driver(mt6370_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 SubPMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mt6370.h b/drivers/mfd/mt6370.h
new file mode 100644
index 000000000000..094e59e4af4e
--- /dev/null
+++ b/drivers/mfd/mt6370.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef __MFD_MT6370_H__
+#define __MFD_MT6370_H__
+
+/* IRQ definitions */
+#define MT6370_IRQ_DIRCHGON 0
+#define MT6370_IRQ_CHG_TREG 4
+#define MT6370_IRQ_CHG_AICR 5
+#define MT6370_IRQ_CHG_MIVR 6
+#define MT6370_IRQ_PWR_RDY 7
+#define MT6370_IRQ_FL_CHG_VINOVP 11
+#define MT6370_IRQ_CHG_VSYSUV 12
+#define MT6370_IRQ_CHG_VSYSOV 13
+#define MT6370_IRQ_CHG_VBATOV 14
+#define MT6370_IRQ_CHG_VINOVPCHG 15
+#define MT6370_IRQ_TS_BAT_COLD 20
+#define MT6370_IRQ_TS_BAT_COOL 21
+#define MT6370_IRQ_TS_BAT_WARM 22
+#define MT6370_IRQ_TS_BAT_HOT 23
+#define MT6370_IRQ_TS_STATC 24
+#define MT6370_IRQ_CHG_FAULT 25
+#define MT6370_IRQ_CHG_STATC 26
+#define MT6370_IRQ_CHG_TMR 27
+#define MT6370_IRQ_CHG_BATABS 28
+#define MT6370_IRQ_CHG_ADPBAD 29
+#define MT6370_IRQ_CHG_RVP 30
+#define MT6370_IRQ_TSHUTDOWN 31
+#define MT6370_IRQ_CHG_IINMEAS 32
+#define MT6370_IRQ_CHG_ICCMEAS 33
+#define MT6370_IRQ_CHGDET_DONE 34
+#define MT6370_IRQ_WDTMR 35
+#define MT6370_IRQ_SSFINISH 36
+#define MT6370_IRQ_CHG_RECHG 37
+#define MT6370_IRQ_CHG_TERM 38
+#define MT6370_IRQ_CHG_IEOC 39
+#define MT6370_IRQ_ADC_DONE 40
+#define MT6370_IRQ_PUMPX_DONE 41
+#define MT6370_IRQ_BST_BATUV 45
+#define MT6370_IRQ_BST_MIDOV 46
+#define MT6370_IRQ_BST_OLP 47
+#define MT6370_IRQ_ATTACH 48
+#define MT6370_IRQ_DETACH 49
+#define MT6370_IRQ_HVDCP_STPDONE 51
+#define MT6370_IRQ_HVDCP_VBUSDET_DONE 52
+#define MT6370_IRQ_HVDCP_DET 53
+#define MT6370_IRQ_CHGDET 54
+#define MT6370_IRQ_DCDT 55
+#define MT6370_IRQ_DIRCHG_VGOK 59
+#define MT6370_IRQ_DIRCHG_WDTMR 60
+#define MT6370_IRQ_DIRCHG_UC 61
+#define MT6370_IRQ_DIRCHG_OC 62
+#define MT6370_IRQ_DIRCHG_OV 63
+#define MT6370_IRQ_OVPCTRL_SWON 67
+#define MT6370_IRQ_OVPCTRL_UVP_D 68
+#define MT6370_IRQ_OVPCTRL_UVP 69
+#define MT6370_IRQ_OVPCTRL_OVP_D 70
+#define MT6370_IRQ_OVPCTRL_OVP 71
+#define MT6370_IRQ_FLED_STRBPIN 72
+#define MT6370_IRQ_FLED_TORPIN 73
+#define MT6370_IRQ_FLED_TX 74
+#define MT6370_IRQ_FLED_LVF 75
+#define MT6370_IRQ_FLED2_SHORT 78
+#define MT6370_IRQ_FLED1_SHORT 79
+#define MT6370_IRQ_FLED2_STRB 80
+#define MT6370_IRQ_FLED1_STRB 81
+#define mT6370_IRQ_FLED2_STRB_TO 82
+#define MT6370_IRQ_FLED1_STRB_TO 83
+#define MT6370_IRQ_FLED2_TOR 84
+#define MT6370_IRQ_FLED1_TOR 85
+#define MT6370_IRQ_OTP 93
+#define MT6370_IRQ_VDDA_OVP 94
+#define MT6370_IRQ_VDDA_UV 95
+#define MT6370_IRQ_LDO_OC 103
+#define MT6370_IRQ_BLED_OCP 118
+#define MT6370_IRQ_BLED_OVP 119
+#define MT6370_IRQ_DSV_VNEG_OCP 123
+#define MT6370_IRQ_DSV_VPOS_OCP 124
+#define MT6370_IRQ_DSV_BST_OCP 125
+#define MT6370_IRQ_DSV_VNEG_SCP 126
+#define MT6370_IRQ_DSV_VPOS_SCP 127
+
+enum {
+ MT6370_USBC_I2C = 0,
+ MT6370_PMU_I2C,
+ MT6370_MAX_I2C
+};
+
+struct mt6370_info {
+ struct i2c_client *i2c[MT6370_MAX_I2C];
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __MFD_MT6375_H__ */
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
index 0f097f4829d1..2ecd271de2fb 100644
--- a/drivers/mfd/ocelot-spi.c
+++ b/drivers/mfd/ocelot-spi.c
@@ -276,6 +276,7 @@ static const struct spi_device_id ocelot_spi_ids[] = {
{ "vsc7512", 0 },
{ }
};
+MODULE_DEVICE_TABLE(spi, ocelot_spi_ids);
static const struct of_device_id ocelot_spi_of_match[] = {
{ .compatible = "mscc,vsc7512" },
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 00003a868d28..7e2cd79d17eb 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -60,6 +60,7 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pmi8994", .data = N_USIDS(2) },
{ .compatible = "qcom,pmi8998", .data = N_USIDS(2) },
{ .compatible = "qcom,pmk8002", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmp8074", .data = N_USIDS(2) },
{ .compatible = "qcom,smb2351", .data = N_USIDS(2) },
{ .compatible = "qcom,spmi-pmic", .data = N_USIDS(1) },
{ }
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index d5d641efa077..e00da7c7e3b1 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -67,6 +67,10 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
case RK817_SECONDS_REG ... RK817_WEEKS_REG:
case RK817_RTC_STATUS_REG:
case RK817_CODEC_DTOP_LPT_SRST:
+ case RK817_GAS_GAUGE_ADC_CONFIG0 ... RK817_GAS_GAUGE_CUR_ADC_K0:
+ case RK817_PMIC_CHRG_STS:
+ case RK817_PMIC_CHRG_OUT:
+ case RK817_PMIC_CHRG_IN:
case RK817_INT_STS_REG0:
case RK817_INT_STS_REG1:
case RK817_INT_STS_REG2:
@@ -74,7 +78,7 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
return true;
}
- return true;
+ return false;
}
static const struct regmap_config rk818_regmap_config = {
@@ -127,6 +131,11 @@ static const struct resource rk817_pwrkey_resources[] = {
DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
};
+static const struct resource rk817_charger_resources[] = {
+ DEFINE_RES_IRQ(RK817_IRQ_PLUG_IN),
+ DEFINE_RES_IRQ(RK817_IRQ_PLUG_OUT),
+};
+
static const struct mfd_cell rk805s[] = {
{ .name = "rk808-clkout", },
{ .name = "rk808-regulator", },
@@ -166,6 +175,11 @@ static const struct mfd_cell rk817s[] = {
.resources = &rk817_rtc_resources[0],
},
{ .name = "rk817-codec",},
+ {
+ .name = "rk817-charger",
+ .num_resources = ARRAY_SIZE(rk817_charger_resources),
+ .resources = &rk817_charger_resources[0],
+ },
};
static const struct mfd_cell rk818s[] = {
diff --git a/drivers/mfd/rt5120.c b/drivers/mfd/rt5120.c
new file mode 100644
index 000000000000..8046e383bc92
--- /dev/null
+++ b/drivers/mfd/rt5120.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#define RT5120_REG_INTENABLE 0x1D
+#define RT5120_REG_INTSTAT 0x1E
+#define RT5120_REG_FZCMODE 0x44
+
+#define RT5120_INT_HOTDIE 0
+#define RT5120_INT_PWRKEY_REL 5
+#define RT5120_INT_PWRKEY_PRESS 6
+
+static const struct regmap_range rt5120_rd_yes_ranges[] = {
+ regmap_reg_range(0x03, 0x13),
+ regmap_reg_range(0x1c, 0x20),
+ regmap_reg_range(0x44, 0x44),
+};
+
+static const struct regmap_range rt5120_wr_yes_ranges[] = {
+ regmap_reg_range(0x06, 0x13),
+ regmap_reg_range(0x1c, 0x20),
+ regmap_reg_range(0x44, 0x44),
+};
+
+static const struct regmap_access_table rt5120_rd_table = {
+ .yes_ranges = rt5120_rd_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rt5120_rd_yes_ranges),
+};
+
+static const struct regmap_access_table rt5120_wr_table = {
+ .yes_ranges = rt5120_wr_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rt5120_wr_yes_ranges),
+};
+
+static const struct regmap_config rt5120_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5120_REG_FZCMODE,
+
+ .wr_table = &rt5120_wr_table,
+ .rd_table = &rt5120_rd_table,
+};
+
+static const struct regmap_irq rt5120_irqs[] = {
+ REGMAP_IRQ_REG_LINE(RT5120_INT_HOTDIE, 8),
+ REGMAP_IRQ_REG_LINE(RT5120_INT_PWRKEY_REL, 8),
+ REGMAP_IRQ_REG_LINE(RT5120_INT_PWRKEY_PRESS, 8),
+};
+
+static const struct regmap_irq_chip rt5120_irq_chip = {
+ .name = "rt5120-pmic",
+ .status_base = RT5120_REG_INTSTAT,
+ .mask_base = RT5120_REG_INTENABLE,
+ .ack_base = RT5120_REG_INTSTAT,
+ .mask_invert = true,
+ .use_ack = true,
+ .num_regs = 1,
+ .irqs = rt5120_irqs,
+ .num_irqs = ARRAY_SIZE(rt5120_irqs),
+};
+
+static const struct resource rt5120_regulator_resources[] = {
+ DEFINE_RES_IRQ(RT5120_INT_HOTDIE),
+};
+
+static const struct resource rt5120_pwrkey_resources[] = {
+ DEFINE_RES_IRQ_NAMED(RT5120_INT_PWRKEY_PRESS, "pwrkey-press"),
+ DEFINE_RES_IRQ_NAMED(RT5120_INT_PWRKEY_REL, "pwrkey-release"),
+};
+
+static const struct mfd_cell rt5120_devs[] = {
+ MFD_CELL_RES("rt5120-regulator", rt5120_regulator_resources),
+ MFD_CELL_OF("rt5120-pwrkey", rt5120_pwrkey_resources, NULL, 0, 0, "richtek,rt5120-pwrkey"),
+};
+
+static int rt5120_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &rt5120_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, i2c->irq, IRQF_ONESHOT, 0,
+ &rt5120_irq_chip, &irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, rt5120_devs,
+ ARRAY_SIZE(rt5120_devs), NULL, 0,
+ regmap_irq_get_domain(irq_data));
+}
+
+static const struct of_device_id rt5120_device_match_table[] = {
+ { .compatible = "richtek,rt5120" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt5120_device_match_table);
+
+static struct i2c_driver rt5120_driver = {
+ .driver = {
+ .name = "rt5120",
+ .of_match_table = rt5120_device_match_table,
+ },
+ .probe_new = rt5120_probe,
+};
+module_i2c_driver(rt5120_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5120 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc0a2c38653e..3ac4508a6742 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1720,7 +1720,12 @@ static struct platform_driver sm501_plat_driver = {
static int __init sm501_base_init(void)
{
- platform_driver_register(&sm501_plat_driver);
+ int ret;
+
+ ret = platform_driver_register(&sm501_plat_driver);
+ if (ret < 0)
+ return ret;
+
return pci_register_driver(&sm501_pci_driver);
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index aeb9ea55f97d..0c4f74197d3e 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -8,14 +8,13 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
@@ -30,17 +29,12 @@
* @irq_trigger: IRQ trigger to use for the interrupt to the host
* @autosleep: bool to enable/disable stmpe autosleep
* @autosleep_timeout: inactivity timeout in milliseconds for autosleep
- * @irq_over_gpio: true if gpio is used to get irq
- * @irq_gpio: gpio number over which irq will be requested (significant only if
- * irq_over_gpio is true)
*/
struct stmpe_platform_data {
int id;
unsigned int blocks;
unsigned int irq_trigger;
bool autosleep;
- bool irq_over_gpio;
- int irq_gpio;
int autosleep_timeout;
};
@@ -1349,32 +1343,22 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
if (pdata->id < 0)
pdata->id = -1;
- pdata->irq_gpio = of_get_named_gpio_flags(np, "irq-gpio", 0,
- &pdata->irq_trigger);
- if (gpio_is_valid(pdata->irq_gpio))
- pdata->irq_over_gpio = 1;
- else
- pdata->irq_trigger = IRQF_TRIGGER_NONE;
-
of_property_read_u32(np, "st,autosleep-timeout",
&pdata->autosleep_timeout);
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
for_each_available_child_of_node(np, child) {
- if (of_node_name_eq(child, "stmpe_gpio")) {
+ if (of_device_is_compatible(child, stmpe_gpio_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_GPIO;
- } else if (of_node_name_eq(child, "stmpe_keypad")) {
+ else if (of_device_is_compatible(child, stmpe_keypad_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_KEYPAD;
- } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
+ else if (of_device_is_compatible(child, stmpe_ts_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
- } else if (of_node_name_eq(child, "stmpe_adc")) {
+ else if (of_device_is_compatible(child, stmpe_adc_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_ADC;
- } else if (of_node_name_eq(child, "stmpe_pwm")) {
+ else if (of_device_is_compatible(child, stmpe_pwm_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_PWM;
- } else if (of_node_name_eq(child, "stmpe_rotator")) {
- pdata->blocks |= STMPE_BLOCK_ROTATOR;
- }
}
}
@@ -1384,6 +1368,7 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
struct stmpe_platform_data *pdata;
struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
+ struct gpio_desc *irq_gpio;
int ret;
u32 val;
@@ -1437,18 +1422,20 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
if (ci->init)
ci->init(stmpe);
- if (pdata->irq_over_gpio) {
- ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
- GPIOF_DIR_IN, "stmpe");
- if (ret) {
- dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
- ret);
- return ret;
- }
+ irq_gpio = devm_gpiod_get_optional(ci->dev, "irq", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(irq_gpio);
+ if (ret) {
+ dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n", ret);
+ return ret;
+ }
- stmpe->irq = gpio_to_irq(pdata->irq_gpio);
+ if (irq_gpio) {
+ stmpe->irq = gpiod_to_irq(irq_gpio);
+ pdata->irq_trigger = gpiod_is_active_low(irq_gpio) ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
} else {
stmpe->irq = ci->irq;
+ pdata->irq_trigger = IRQF_TRIGGER_NONE;
}
if (stmpe->irq < 0) {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index bdb2ce7ff03b..9489e80e905a 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -66,14 +66,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
goto err_map;
}
- /* Parse the device's DT node for an endianness specification */
- if (of_property_read_bool(np, "big-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
- else if (of_property_read_bool(np, "little-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
- else if (of_property_read_bool(np, "native-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE;
-
/*
* search for reg-io-width property in DT. If it is not provided,
* default to 4 bytes. regmap_init_mmio will return an error if values
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 2679c41232e6..f6b4b9d94bbd 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -882,7 +882,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
* SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0.
*
* Also, always enable SmartReflex bit as that's needed for omaps to
- * to do anything over I2C4 for voltage scaling even if SmartReflex
+ * do anything over I2C4 for voltage scaling even if SmartReflex
* is disabled. Without the SmartReflex bit omap sys_clkreq idle
* signal will never trigger for retention idle.
*/
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 4f576f0160a9..87496c1cb8bc 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -14,6 +14,7 @@
* by syed khasim <x0khasim@ti.com>
*/
+#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 94e9fb4cdd76..358ad56f6524 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -513,4 +513,5 @@ source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
+source "drivers/misc/mchp_pci1xxxx/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2be8542616dd..ac9b3e757ba1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -60,4 +60,5 @@ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o
obj-$(CONFIG_OPEN_DICE) += open-dice.o
-obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o \ No newline at end of file
+obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/
+obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 075f3a36d512..a58b7cb81d98 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -1014,7 +1014,7 @@ exit_done:
* ...argument 0 is string ID
*/
count = strlen(msg_buff);
- strlcpy(&msg_buff[count],
+ strscpy(&msg_buff[count],
&p[str_table + args[0]],
ALTERA_MESSAGE_LENGTH - count);
break;
@@ -2146,7 +2146,7 @@ static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
- strlcpy(value, value_ptr, vallen);
+ strscpy(value, value_ptr, vallen);
}
}
@@ -2162,13 +2162,13 @@ static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
status = 0;
if (key != NULL)
- strlcpy(key, &p[note_strings +
+ strscpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
keylen);
if (value != NULL)
- strlcpy(value, &p[note_strings +
+ strscpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
vallen);
diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c
index a16b99bdaa13..d4a96137728d 100644
--- a/drivers/misc/bcm-vk/bcm_vk_dev.c
+++ b/drivers/misc/bcm-vk/bcm_vk_dev.c
@@ -1339,7 +1339,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, vk);
irq = pci_alloc_irq_vectors(pdev,
- 1,
+ VK_MSIX_IRQ_MIN_REQ,
VK_MSIX_IRQ_MAX,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -1401,7 +1401,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bcm_vk_tty_set_irq_enabled(vk, i);
}
- id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&bcm_vk_ida, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "unable to get id\n");
@@ -1500,7 +1500,7 @@ err_kfree_name:
misc_device->name = NULL;
err_ida_remove:
- ida_simple_remove(&bcm_vk_ida, id);
+ ida_free(&bcm_vk_ida, id);
err_irq:
for (i = 0; i < vk->num_irqs; i++)
@@ -1573,7 +1573,7 @@ static void bcm_vk_remove(struct pci_dev *pdev)
if (misc_device->name) {
misc_deregister(misc_device);
kfree(misc_device->name);
- ida_simple_remove(&bcm_vk_ida, vk->devid);
+ ida_free(&bcm_vk_ida, vk->devid);
}
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 60c829113299..2c64f55cf01f 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
mmput(mm);
}
-static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
-{
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_one unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
-
- cxl_fault_segment(ctx, mm, ea);
-
- mmput(mm);
-}
-
static u64 next_segment(u64 ea, u64 vsid)
{
if (vsid & SLB_VSID_B_1T)
@@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid)
return ea + 1;
}
-static void cxl_prefault_vma(struct cxl_context *ctx)
+static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
{
u64 ea, last_esid = 0;
struct copro_slb slb;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int rc;
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_vm unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
rc = copro_calculate_slb(mm, ea, &slb);
@@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
}
}
mmap_read_unlock(mm);
-
- mmput(mm);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
{
+ struct mm_struct *mm = get_mem_context(ctx);
+
+ if (mm == NULL) {
+ pr_devel("cxl_prefault unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ return;
+ }
+
switch (ctx->afu->prefault_mode) {
case CXL_PREFAULT_WED:
- cxl_prefault_one(ctx, wed);
+ cxl_fault_segment(ctx, mm, wed);
break;
case CXL_PREFAULT_ALL:
- cxl_prefault_vma(ctx);
+ cxl_prefault_vma(ctx, mm);
break;
default:
break;
}
+
+ mmput(mm);
}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 4a9445fea93d..8a841a75d893 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -136,7 +136,7 @@ static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- strlcpy(info->type, "eeprom", I2C_NAME_SIZE);
+ strscpy(info->type, "eeprom", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index ada2a3af36d7..bb3ed352b95f 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1075,7 +1075,7 @@ static const struct i2c_device_id *idt_ee_match_id(struct fwnode_handle *fwnode)
return NULL;
p = strchr(compatible, ',');
- strlcpy(devname, p ? p + 1 : compatible, sizeof(devname));
+ strscpy(devname, p ? p + 1 : compatible, sizeof(devname));
/* Search through the device name */
while (id->name[0]) {
if (strcmp(devname, id->name) == 0)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 5d9e3483b89d..7ff0b63c25e3 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1515,7 +1515,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
args[1].length = dsp_attr_buf_len;
args[1].fd = -1;
- fl->pd = 1;
+ fl->pd = USER_PD;
return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
FASTRPC_SCALARS(0, 1, 1), args);
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 861c81006c6d..bd01d0d940c0 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -10,6 +10,7 @@ config HABANA_AI
select HWMON
select DMA_SHARED_BUFFER
select CRC32
+ select FW_LOADER
help
Enables PCIe card driver for Habana's AI Processors (AIP) that are
designed to accelerate Deep Learning inference and training workloads.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index b35d7000c86b..a48a9e0969ed 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -8,13 +8,13 @@ obj-$(CONFIG_HABANA_AI) := habanalabs.o
include $(src)/common/Makefile
habanalabs-y += $(HL_COMMON_FILES)
-include $(src)/goya/Makefile
-habanalabs-y += $(HL_GOYA_FILES)
+include $(src)/gaudi2/Makefile
+habanalabs-y += $(HL_GAUDI2_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
-include $(src)/gaudi2/Makefile
-habanalabs-y += $(HL_GAUDI2_FILES)
+include $(src)/goya/Makefile
+habanalabs-y += $(HL_GOYA_FILES)
habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index b027f66f8bd4..2b332991ac6a 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -12,20 +12,18 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
+#define CB_VA_POOL_SIZE (4UL * SZ_1G)
+
static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct hl_vm_va_block *va_block, *tmp;
- dma_addr_t bus_addr;
- u64 virt_addr;
u32 page_size = prop->pmmu.page_size;
- s32 offset;
int rc;
if (!hdev->supports_cb_mapping) {
dev_err_ratelimited(hdev->dev,
- "Cannot map CB because no VA range is allocated for CB mapping\n");
+ "Mapping a CB to the device's MMU is not supported\n");
return -EINVAL;
}
@@ -35,106 +33,45 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
return -EINVAL;
}
- INIT_LIST_HEAD(&cb->va_block_list);
-
- for (bus_addr = cb->bus_address;
- bus_addr < cb->bus_address + cb->size;
- bus_addr += page_size) {
-
- virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
- if (!virt_addr) {
- dev_err(hdev->dev,
- "Failed to allocate device virtual address for CB\n");
- rc = -ENOMEM;
- goto err_va_pool_free;
- }
+ if (cb->is_mmu_mapped)
+ return 0;
- va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
- if (!va_block) {
- rc = -ENOMEM;
- gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
- goto err_va_pool_free;
- }
+ cb->roundup_size = roundup(cb->size, page_size);
- va_block->start = virt_addr;
- va_block->end = virt_addr + page_size - 1;
- va_block->size = page_size;
- list_add_tail(&va_block->node, &cb->va_block_list);
+ cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
+ if (!cb->virtual_addr) {
+ dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
+ return -ENOMEM;
}
- mutex_lock(&ctx->mmu_lock);
-
- bus_addr = cb->bus_address;
- offset = 0;
- list_for_each_entry(va_block, &cb->va_block_list, node) {
- rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
- va_block->size, list_is_last(&va_block->node,
- &cb->va_block_list));
- if (rc) {
- dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
- va_block->start);
- goto err_va_umap;
- }
-
- bus_addr += va_block->size;
- offset += va_block->size;
+ mutex_lock(&hdev->mmu_lock);
+ rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
+ goto err_va_umap;
}
-
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
-
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
-
return rc;
err_va_umap:
- list_for_each_entry(va_block, &cb->va_block_list, node) {
- if (offset <= 0)
- break;
- hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
- offset <= va_block->size);
- offset -= va_block->size;
- }
-
- rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
-
- mutex_unlock(&ctx->mmu_lock);
-
-err_va_pool_free:
- list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
- gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
- list_del(&va_block->node);
- kfree(va_block);
- }
-
+ mutex_unlock(&hdev->mmu_lock);
+ gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
return rc;
}
static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
- struct hl_vm_va_block *va_block, *tmp;
-
- mutex_lock(&ctx->mmu_lock);
-
- list_for_each_entry(va_block, &cb->va_block_list, node)
- if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
- list_is_last(&va_block->node,
- &cb->va_block_list)))
- dev_warn_ratelimited(hdev->dev,
- "Failed to unmap CB's va 0x%llx\n",
- va_block->start);
+ mutex_lock(&hdev->mmu_lock);
+ hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
+ mutex_unlock(&hdev->mmu_lock);
- mutex_unlock(&ctx->mmu_lock);
-
- list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
- gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
- list_del(&va_block->node);
- kfree(va_block);
- }
+ gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
}
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
@@ -376,7 +313,6 @@ int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
static int hl_cb_info(struct hl_mem_mgr *mmg,
u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
- struct hl_vm_va_block *va_block;
struct hl_cb *cb;
int rc = 0;
@@ -388,9 +324,8 @@ static int hl_cb_info(struct hl_mem_mgr *mmg,
}
if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
- va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
- if (va_block) {
- *device_va = va_block->start;
+ if (cb->is_mmu_mapped) {
+ *device_va = cb->virtual_addr;
} else {
dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
rc = -EINVAL;
@@ -566,16 +501,23 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx)
return -ENOMEM;
}
- rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
- prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
+ ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
+ CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+ if (!ctx->cb_va_pool_base) {
+ rc = -ENOMEM;
+ goto err_pool_destroy;
+ }
+ rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to VA gen pool for CB mapping\n");
- goto err_pool_destroy;
+ goto err_unreserve_va_block;
}
return 0;
+err_unreserve_va_block:
+ hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
err_pool_destroy:
gen_pool_destroy(ctx->cb_va_pool);
@@ -590,4 +532,5 @@ void hl_cb_va_pool_fini(struct hl_ctx *ctx)
return;
gen_pool_destroy(ctx->cb_va_pool);
+ hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 90a4574cbe2d..fa05770865c6 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -12,7 +12,9 @@
#include <linux/slab.h>
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
- HL_CS_FLAGS_COLLECTIVE_WAIT)
+ HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
+ HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+
#define MAX_TS_ITER_NUM 10
@@ -824,10 +826,10 @@ static void cs_timedout(struct work_struct *work)
}
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_enable, 1, 0);
+ rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
if (rc) {
- hdev->last_error.cs_timeout.timestamp = ktime_get();
- hdev->last_error.cs_timeout.seq = cs->sequence;
+ hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
+ hdev->captured_err_info.cs_timeout.seq = cs->sequence;
event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
@@ -1242,6 +1244,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_RESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
return CS_UNRESERVE_SIGNALS;
+ else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+ return CS_TYPE_ENGINE_CORE;
else
return CS_TYPE_DEFAULT;
}
@@ -1253,6 +1257,7 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
u32 cs_type_flags, num_chunks;
enum hl_device_status status;
enum hl_cs_type cs_type;
+ bool is_sync_stream;
if (!hl_device_operational(hdev, &status)) {
return -EBUSY;
@@ -1276,9 +1281,10 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
cs_type = hl_cs_get_cs_type(cs_type_flags);
num_chunks = args->in.num_chunks_execute;
- if (unlikely((cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
- cs_type == CS_TYPE_COLLECTIVE_WAIT) &&
- !hdev->supports_sync_stream)) {
+ is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT);
+
+ if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
dev_err(hdev->dev, "Sync stream CS is not supported\n");
return -EINVAL;
}
@@ -1288,7 +1294,7 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
return -EINVAL;
}
- } else if (num_chunks != 1) {
+ } else if (is_sync_stream && num_chunks != 1) {
dev_err(hdev->dev,
"Sync stream CS mandates one chunk only, context %d\n",
ctx->asid);
@@ -1584,13 +1590,14 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
bool need_soft_reset = false;
- int rc = 0, do_ctx_switch;
+ int rc = 0, do_ctx_switch = 0;
void __user *chunks;
u32 num_chunks, tmp;
u16 sob_count;
int ret;
- do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
+ if (hdev->supports_ctx_switch)
+ do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
mutex_lock(&hpriv->restore_phase_mutex);
@@ -1661,9 +1668,10 @@ wait_again:
}
}
- ctx->thread_ctx_switch_wait_token = 1;
+ if (hdev->supports_ctx_switch)
+ ctx->thread_ctx_switch_wait_token = 1;
- } else if (!ctx->thread_ctx_switch_wait_token) {
+ } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
rc = hl_poll_timeout_memory(hdev,
&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
100, jiffies_to_usecs(hdev->timeout_jiffies), false);
@@ -2351,6 +2359,41 @@ out:
return rc;
}
+static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
+ u32 num_engine_cores, u32 core_command)
+{
+ int rc;
+ struct hl_device *hdev = hpriv->hdev;
+ void __user *engine_cores_arr;
+ u32 *cores;
+
+ if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
+ dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
+ return -EINVAL;
+ }
+
+ if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
+ dev_err(hdev->dev, "Engine core command is invalid\n");
+ return -EINVAL;
+ }
+
+ engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
+ cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
+ if (!cores)
+ return -ENOMEM;
+
+ if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
+ dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
+ kfree(cores);
+ return -EFAULT;
+ }
+
+ rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
+ kfree(cores);
+
+ return rc;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
@@ -2403,6 +2446,10 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_unreserve_signals(hpriv,
args->in.encaps_sig_handle_id);
break;
+ case CS_TYPE_ENGINE_CORE:
+ rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
+ args->in.num_engine_cores, args->in.core_command);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
@@ -2524,7 +2571,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
ktime_t max_ktime, first_cs_time;
enum hl_cs_wait_status status;
- memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
+ memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
/* get all fences under the same lock */
rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
@@ -2826,7 +2873,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
/* allocate array for the fences */
- fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
+ fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
if (!fence_arr) {
rc = -ENOMEM;
goto free_seq_arr;
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 64439f33a19b..48d3ec8b5c82 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -291,14 +291,16 @@ static int vm_show(struct seq_file *s, void *data)
if (ctx->asid != HL_KERNEL_ASID_ID &&
!list_empty(&ctx->hw_block_mem_list)) {
seq_puts(s, "\nhw_block mappings:\n\n");
- seq_puts(s, " virtual address size HW block id\n");
- seq_puts(s, "-------------------------------------------\n");
+ seq_puts(s,
+ " virtual address block size mapped size HW block id\n");
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
mutex_lock(&ctx->hw_block_list_lock);
- list_for_each_entry(lnode, &ctx->hw_block_mem_list,
- node) {
+ list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
seq_printf(s,
- " 0x%-14lx %-6u %-9u\n",
- lnode->vaddr, lnode->size, lnode->id);
+ " 0x%-14lx %-6u %-6u %-9u\n",
+ lnode->vaddr, lnode->block_size, lnode->mapped_size,
+ lnode->id);
}
mutex_unlock(&ctx->hw_block_list_lock);
}
@@ -591,6 +593,7 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct engines_data eng_data;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
@@ -598,7 +601,25 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
- hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s);
+ eng_data.actual_size = 0;
+ eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
+ eng_data.buf = vmalloc(eng_data.allocated_buf_size);
+ if (!eng_data.buf)
+ return -ENOMEM;
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
+
+ if (eng_data.actual_size > eng_data.allocated_buf_size) {
+ dev_err(hdev->dev,
+ "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
+ eng_data.actual_size, eng_data.allocated_buf_size);
+ vfree(eng_data.buf);
+ return -ENOMEM;
+ }
+
+ seq_write(s, eng_data.buf, eng_data.actual_size);
+
+ vfree(eng_data.buf);
return 0;
}
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index b30aeb1c657f..233d8b46c831 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -13,6 +13,8 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
+#include <trace/events/habanalabs.h>
+
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
enum dma_alloc_type {
@@ -26,8 +28,9 @@ enum dma_alloc_type {
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
- * @hdev: pointer to habanalabs device structure
+ * @hdev: pointer to habanalabs device structure.
* @addr: the address the caller wants to access.
+ * @region: the PCI region.
*
* @return: the old BAR base address on success, U64_MAX for failure.
* The caller should set it back to the old address after use.
@@ -37,58 +40,64 @@ enum dma_alloc_type {
* This function can be called also if the bar doesn't need to be set,
* in that case it just won't change the base.
*/
-static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr)
+static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 bar_base_addr;
+ u64 bar_base_addr, old_base;
- bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+ if (is_power_of_2(prop->dram_pci_bar_size))
+ bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+ else
+ bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
+ prop->dram_pci_bar_size;
- return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
-}
+ old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
+ /* in case of success we need to update the new BAR base */
+ if (old_base != U64_MAX)
+ region->region_base = bar_base_addr;
+
+ return old_base;
+}
static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type)
{
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
+ void __iomem *acc_addr;
u64 old_base = 0, rc;
if (region_type == PCI_REGION_DRAM) {
- old_base = hl_set_dram_bar(hdev, addr);
+ old_base = hl_set_dram_bar(hdev, addr, region);
if (old_base == U64_MAX)
return -EIO;
}
+ acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base +
+ region->offset_in_bar;
switch (acc_type) {
case DEBUGFS_READ8:
- *val = readb(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readb(acc_addr);
break;
case DEBUGFS_WRITE8:
- writeb(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writeb(*val, acc_addr);
break;
case DEBUGFS_READ32:
- *val = readl(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readl(acc_addr);
break;
case DEBUGFS_WRITE32:
- writel(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writel(*val, acc_addr);
break;
case DEBUGFS_READ64:
- *val = readq(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readq(acc_addr);
break;
case DEBUGFS_WRITE64:
- writeq(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writeq(*val, acc_addr);
break;
}
if (region_type == PCI_REGION_DRAM) {
- rc = hl_set_dram_bar(hdev, old_base);
+ rc = hl_set_dram_bar(hdev, old_base, region);
if (rc == U64_MAX)
return -EIO;
}
@@ -97,9 +106,10 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
}
static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag, enum dma_alloc_type alloc_type)
+ gfp_t flag, enum dma_alloc_type alloc_type,
+ const char *caller)
{
- void *ptr;
+ void *ptr = NULL;
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
@@ -113,11 +123,16 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
break;
}
+ if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
+ trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
+ caller);
+
return ptr;
}
static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, enum dma_alloc_type alloc_type)
+ dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
+ const char *caller)
{
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
@@ -130,39 +145,44 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
}
+
+ trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller);
}
-void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
+void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT);
+ return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
}
-void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, const char *caller)
{
- hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT);
+ hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
-void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+ return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
-void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
+ const char *caller)
{
- hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+ hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
-void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
- dma_addr_t *dma_handle)
+void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL);
+ return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
}
-void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
+ const char *caller)
{
- hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL);
+ hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
@@ -267,6 +287,30 @@ int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
return 0;
}
+void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
+{
+ va_list args;
+ int str_size;
+
+ va_start(args, fmt);
+ /* Calculate formatted string length. Assuming each string is null terminated, hence
+ * increment result by 1
+ */
+ str_size = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ if ((e->actual_size + str_size) < e->allocated_buf_size) {
+ va_start(args, fmt);
+ vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
+ va_end(args);
+ }
+
+ /* Need to update the size even when not updating destination buffer to get the exact size
+ * of all input strings
+ */
+ e->actual_size += str_size;
+}
+
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
@@ -322,6 +366,8 @@ static void hpriv_release(struct kref *ref)
hdev = hpriv->hdev;
+ hdev->asic_funcs->send_device_activity(hdev, false);
+
put_pid(hpriv->taskpid);
hl_debugfs_remove_file(hpriv);
@@ -673,7 +719,7 @@ static int device_early_init(struct hl_device *hdev)
if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
- sizeof(*hdev->cq_wq),
+ sizeof(struct workqueue_struct *),
GFP_KERNEL);
if (!hdev->cq_wq) {
rc = -ENOMEM;
@@ -1091,7 +1137,9 @@ int hl_device_resume(struct hl_device *hdev)
/* 'in_reset' was set to true during suspend, now we must clear it in order
* for hard reset to be performed
*/
+ spin_lock(&hdev->reset_info.lock);
hdev->reset_info.in_reset = 0;
+ spin_unlock(&hdev->reset_info.lock);
rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
if (rc) {
@@ -1518,6 +1566,13 @@ kill_processes:
*/
hdev->disabled = false;
+ /* F/W security enabled indication might be updated after hard-reset */
+ if (hard_reset) {
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc)
+ goto out_err;
+ }
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
@@ -1556,7 +1611,7 @@ kill_processes:
if (!hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
} else {
- rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
+ rc = hdev->asic_funcs->compute_reset_late_init(hdev);
if (rc) {
if (reset_upon_device_release)
dev_err(hdev->dev,
@@ -1704,7 +1759,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
char *name;
bool add_cdev_sysfs_on_err = false;
- name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
+ hdev->cdev_idx = hdev->id / 2;
+
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto out_disabled;
@@ -1719,7 +1776,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto out_disabled;
- name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
+ name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto free_dev;
@@ -1806,7 +1863,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
- sizeof(*hdev->shadow_cs_queue), GFP_KERNEL);
+ sizeof(struct hl_cs *), GFP_KERNEL);
if (!hdev->shadow_cs_queue) {
rc = -ENOMEM;
goto cq_fini;
@@ -1997,10 +2054,10 @@ out_disabled:
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
"Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id / 2);
+ hdev->cdev_idx);
else
pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id / 2);
+ hdev->cdev_idx);
return rc;
}
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 608ca67527a5..2de6a9bd564d 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,14 +15,6 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
-struct fw_binning_conf {
- u64 tpc_binning;
- u32 dec_binning;
- u32 hbm_binning;
- u32 edma_binning;
- u32 mme_redundancy;
-};
-
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -260,7 +252,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
struct hl_bd *sent_bd;
- u32 tmp, expected_ack_val, pi;
+ u32 tmp, expected_ack_val, pi, opcode;
int rc;
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
@@ -327,8 +319,35 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
- dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
- rc, (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+ opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
+
+ if (!prop->supports_advanced_cpucp_rc) {
+ dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
+ goto scrub_descriptor;
+ }
+
+ switch (rc) {
+ case cpucp_packet_invalid:
+ dev_err(hdev->dev,
+ "CPU packet %d is not supported by F/W\n", opcode);
+ break;
+ case cpucp_packet_fault:
+ dev_err(hdev->dev,
+ "F/W failed processing CPU packet %d\n", opcode);
+ break;
+ case cpucp_packet_invalid_pkt:
+ dev_dbg(hdev->dev,
+ "CPU packet %d is not supported by F/W\n", opcode);
+ break;
+ case cpucp_packet_invalid_params:
+ dev_err(hdev->dev,
+ "F/W reports invalid parameters for CPU packet %d\n", opcode);
+ break;
+
+ default:
+ dev_err(hdev->dev,
+ "Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
+ }
/* propagate the return code from the f/w to the callers who want to check it */
if (result)
@@ -340,6 +359,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
*result = le64_to_cpu(pkt->result);
}
+scrub_descriptor:
/* Scrub previous buffer descriptor 'ctl' field which contains the
* previous PI value written during packet submission.
* We must do this or else F/W can read an old value upon queue wraparound.
@@ -462,6 +482,21 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
size);
}
+int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(open);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc)
+ dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
+
+ return rc;
+}
+
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
struct cpucp_packet hb_pkt;
@@ -581,6 +616,15 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
/* All warnings should go here in order not to reach the unknown error validation */
+ if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - EEPROM failure detected, default settings applied\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
+ }
+
if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
dev_warn(hdev->dev,
"Device boot warning - Skipped DRAM initialization\n");
@@ -1476,6 +1520,8 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
*/
prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
+ prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
+
dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
cpu_boot_dev_sts0);
@@ -1514,7 +1560,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev)
hdev->asic_funcs->init_firmware_preload_params(hdev);
/*
- * In order to determine boot method (static VS dymanic) we need to
+ * In order to determine boot method (static VS dynamic) we need to
* read the boot caps register
*/
rc = hl_fw_read_preboot_caps(hdev);
@@ -1781,7 +1827,7 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
*
* @return the CRC32 result
*
- * NOTE: kernel's CRC32 differ's from standard CRC32 calculation.
+ * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
* in order to be aligned we need to flip the bits of both the input
* initial CRC and kernel's CRC32 result.
* in addition both sides use initial CRC of 0,
@@ -1798,7 +1844,7 @@ static u32 hl_fw_compat_crc32(u8 *data, size_t size)
*
* @hdev: pointer to the habanalabs device structure
* @addr: device address of memory transfer
- * @size: memory transter size
+ * @size: memory transfer size
* @region: PCI memory region
*
* @return 0 on success, otherwise non-zero error code
@@ -1854,50 +1900,36 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
u64 addr;
int rc;
- if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC) {
- dev_err(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
+ if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
+ dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
fw_desc->header.magic);
- return -EIO;
- }
- if (fw_desc->header.version != HL_COMMS_DESC_VER) {
- dev_err(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
+ if (fw_desc->header.version != HL_COMMS_DESC_VER)
+ dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
fw_desc->header.version);
- return -EIO;
- }
/*
- * calc CRC32 of data without header.
+ * Calc CRC32 of data without header. use the size of the descriptor
+ * reported by firmware, without calculating it ourself, to allow adding
+ * more fields to the lkd_fw_comms_desc structure.
* note that no alignment/stride address issues here as all structures
- * are 64 bit padded
+ * are 64 bit padded.
*/
- data_size = sizeof(struct lkd_fw_comms_desc) -
- sizeof(struct comms_desc_header);
data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
-
- if (le16_to_cpu(fw_desc->header.size) != data_size) {
- dev_err(hdev->dev,
- "Invalid descriptor size 0x%x, expected size 0x%zx\n",
- le16_to_cpu(fw_desc->header.size), data_size);
- return -EIO;
- }
+ data_size = le16_to_cpu(fw_desc->header.size);
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
-
if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
- dev_err(hdev->dev,
- "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
- data_crc32, fw_desc->header.crc32);
+ dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
+ data_crc32, fw_desc->header.crc32);
return -EIO;
}
/* find memory region to which to copy the image */
addr = le64_to_cpu(fw_desc->img_addr);
region_id = hl_get_pci_memory_region(hdev, addr);
- if ((region_id != PCI_REGION_SRAM) &&
- ((region_id != PCI_REGION_DRAM))) {
- dev_err(hdev->dev,
- "Invalid region to copy FW image address=%llx\n", addr);
+ if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
+ dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
return -EIO;
}
@@ -1914,8 +1946,7 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
fw_loader->dynamic_loader.fw_image_size,
region);
if (rc) {
- dev_err(hdev->dev,
- "invalid mem transfer request for FW image\n");
+ dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
return rc;
}
@@ -2422,18 +2453,6 @@ static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
msg.reset_cause = *(__u8 *) data;
break;
- case HL_COMMS_BINNING_CONF_TYPE:
- {
- struct fw_binning_conf *binning_conf = (struct fw_binning_conf *) data;
-
- msg.tpc_binning_conf = cpu_to_le64(binning_conf->tpc_binning);
- msg.dec_binning_conf = cpu_to_le32(binning_conf->dec_binning);
- msg.hbm_binning_conf = cpu_to_le32(binning_conf->hbm_binning);
- msg.edma_binning_conf = cpu_to_le32(binning_conf->edma_binning);
- msg.mme_redundancy_conf = cpu_to_le32(binning_conf->mme_redundancy);
- break;
- }
-
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
@@ -2503,13 +2522,6 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
*/
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
- /* if no preboot loaded indication- wait for preboot */
- if (!(hdev->fw_loader.fw_comp_loaded & FW_TYPE_PREBOOT_CPU)) {
- rc = hl_fw_wait_preboot_ready(hdev);
- if (rc)
- return -EIO;
- }
-
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
0, true,
fw_loader->cpu_timeout);
@@ -2547,7 +2559,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
/*
* when testing FW load (without Linux) on PLDM we don't want to
* wait until boot fit is active as it may take several hours.
- * instead, we load the bootfit and let it do all initializations in
+ * instead, we load the bootfit and let it do all initialization in
* the background.
*/
if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
@@ -2961,3 +2973,49 @@ void hl_fw_set_max_power(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
+
+static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
+ u32 nonce, u32 timeout)
+{
+ struct cpucp_packet pkt = {};
+ dma_addr_t req_dma_addr;
+ void *req_cpu_addr;
+ int rc;
+
+ req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
+ if (!data) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
+ return -ENOMEM;
+ }
+
+ memset(data, 0, size);
+
+ pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(req_dma_addr);
+ pkt.data_max_size = cpu_to_le32(size);
+ pkt.nonce = cpu_to_le32(nonce);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ timeout, NULL);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
+ goto out;
+ }
+
+ memcpy(data, req_cpu_addr, size);
+
+out:
+ hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
+
+ return rc;
+}
+
+int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
+ u32 nonce)
+{
+ return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
+ sizeof(struct cpucp_sec_attest_info), nonce,
+ HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
+}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index d59bba9e55c9..58c95b13be69 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -66,6 +66,7 @@ struct hl_fpriv;
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC 10000000 /* 10s */
#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC 1000000 /* 1s */
@@ -94,7 +95,7 @@ struct hl_fpriv;
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
/**
- * enum hl_mmu_page_table_locaion - mmu page table location
+ * enum hl_mmu_page_table_location - mmu page table location
* @MMU_DR_PGT: page-table is located on device DRAM.
* @MMU_HR_PGT: page-table is located on host memory.
* @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
@@ -143,6 +144,25 @@ enum hl_mmu_enablement {
#define HL_MAX_DCORES 8
+/* DMA alloc/free wrappers */
+#define hl_asic_dma_alloc_coherent(hdev, size, dma_handle, flags) \
+ hl_asic_dma_alloc_coherent_caller(hdev, size, dma_handle, flags, __func__)
+
+#define hl_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle) \
+ hl_cpu_accessible_dma_pool_alloc_caller(hdev, size, dma_handle, __func__)
+
+#define hl_asic_dma_pool_zalloc(hdev, size, mem_flags, dma_handle) \
+ hl_asic_dma_pool_zalloc_caller(hdev, size, mem_flags, dma_handle, __func__)
+
+#define hl_asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle) \
+ hl_asic_dma_free_coherent_caller(hdev, size, cpu_addr, dma_handle, __func__)
+
+#define hl_cpu_accessible_dma_pool_free(hdev, size, vaddr) \
+ hl_cpu_accessible_dma_pool_free_caller(hdev, size, vaddr, __func__)
+
+#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
+ hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
+
/*
* Reset Flags
*
@@ -208,6 +228,7 @@ enum hl_protection_levels {
* struct iterate_module_ctx - HW module iterator
* @fn: function to apply to each HW module instance
* @data: optional internal data to the function iterator
+ * @rc: return code for optional use of iterator/iterator-caller
*/
struct iterate_module_ctx {
/*
@@ -217,10 +238,12 @@ struct iterate_module_ctx {
* @inst: HW module instance within the block
* @offset: current HW module instance offset from the 1-st HW module instance
* in the 1-st block
- * @data: function specific data
+ * @ctx: the iterator context.
*/
- void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset, void *data);
+ void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset,
+ struct iterate_module_ctx *ctx);
void *data;
+ int rc;
};
struct hl_block_glbl_sec {
@@ -342,7 +365,8 @@ enum hl_cs_type {
CS_TYPE_WAIT,
CS_TYPE_COLLECTIVE_WAIT,
CS_RESERVE_SIGNALS,
- CS_UNRESERVE_SIGNALS
+ CS_UNRESERVE_SIGNALS,
+ CS_TYPE_ENGINE_CORE
};
/*
@@ -544,10 +568,6 @@ struct hl_hints_range {
* @tpc_binning_mask: which TPCs are binned. 0 means usable and 1 means binned.
* @dram_enabled_mask: which DRAMs are enabled.
* @dram_binning_mask: which DRAMs are binned. 0 means usable, 1 means binned.
- * @cb_va_start_addr: virtual start address of command buffers which are mapped
- * to the device's MMU.
- * @cb_va_end_addr: virtual end address of command buffers which are mapped to
- * the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
* @cfg_base_address: config space base address.
@@ -614,6 +634,7 @@ struct hl_hints_range {
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
+ * @num_engine_cores: number of engine cpu cores
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -658,6 +679,7 @@ struct hl_hints_range {
* @set_max_power_on_device_init: true if need to set max power in F/W on device init.
* @supports_user_set_page_size: true if user can set the allocation page size.
* @dma_mask: the dma mask to be set for this device
+ * @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -689,8 +711,6 @@ struct asic_fixed_properties {
u64 tpc_binning_mask;
u64 dram_enabled_mask;
u64 dram_binning_mask;
- u64 cb_va_start_addr;
- u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 cfg_base_address;
u64 mmu_cache_mng_addr;
@@ -734,6 +754,7 @@ struct asic_fixed_properties {
u32 faulty_dram_cluster_map;
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
+ u32 num_engine_cores;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -766,6 +787,7 @@ struct asic_fixed_properties {
u8 set_max_power_on_device_init;
u8 supports_user_set_page_size;
u8 dma_mask;
+ u8 supports_advanced_cpucp_rc;
};
/**
@@ -797,7 +819,7 @@ struct hl_fence {
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
- * @encaps_sig_hdl: encaps signals hanlder.
+ * @encaps_sig_hdl: encaps signals handler.
* @cs_seq: command submission sequence number.
* @type: type of the CS - signal/wait.
* @sob_val: the SOB value that is used in this signal/wait CS.
@@ -898,14 +920,14 @@ struct hl_mmap_mem_buf {
* @buf: back pointer to the parent mappable memory buffer
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
- * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
- * the device's MMU.
* @kernel_address: Holds the CB's kernel virtual address.
+ * @virtual_addr: Holds the CB's virtual address.
* @bus_address: Holds the CB's DMA address.
* @size: holds the CB's size.
+ * @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in.
* @is_pool: true if CB was acquired from the pool, false otherwise.
- * @is_internal: internaly allocated
+ * @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
@@ -914,10 +936,11 @@ struct hl_cb {
struct hl_mmap_mem_buf *buf;
struct list_head debugfs_list;
struct list_head pool_list;
- struct list_head va_block_list;
void *kernel_address;
+ u64 virtual_addr;
dma_addr_t bus_address;
u32 size;
+ u32 roundup_size;
atomic_t cs_cnt;
u8 is_pool;
u8 is_internal;
@@ -1113,7 +1136,7 @@ struct timestamp_reg_info {
* @fence: hl fence object for interrupt completion
* @cq_target_value: CQ target value
* @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
- * handler for taget value comparison
+ * handler for target value comparison
*/
struct hl_user_pending_interrupt {
struct timestamp_reg_info ts_reg_info;
@@ -1372,6 +1395,18 @@ struct fw_load_mgr {
struct hl_cs;
/**
+ * struct engines_data - asic engines data
+ * @buf: buffer for engines data in ascii
+ * @actual_size: actual size of data that was written by the driver to the allocated buffer
+ * @allocated_buf_size: total size of allocated buffer
+ */
+struct engines_data {
+ char *buf;
+ int actual_size;
+ u32 allocated_buf_size;
+};
+
+/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
* @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
@@ -1434,11 +1469,9 @@ struct hl_cs;
* @send_heartbeat: send is-alive packet to CPU-CP and verify response.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
- * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
+ * @compute_reset_late_init: perform certain actions needed after a compute reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
- * @kdma_lock: acquire H/W queues lock. Relevant from GRECO ASIC
- * @kdma_unlock: release H/W queues lock. Relevant from GRECO ASIC
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @get_monitor_dump: retrieve monitor registers dump from F/W.
@@ -1498,6 +1531,8 @@ struct hl_cs;
* @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
+ * @set_engine_cores: set a config command to enigne cores
+ * @send_device_activity: indication to FW about device availability
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1570,13 +1605,11 @@ struct hl_asic_funcs {
int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s);
- int (*non_hard_reset_late_init)(struct hl_device *hdev);
+ bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+ int (*compute_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
- void (*kdma_lock)(struct hl_device *hdev, int dcore_id);
- void (*kdma_unlock)(struct hl_device *hdev, int dcore_id);
u32 (*get_pci_id)(struct hl_device *hdev);
int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
int (*get_monitor_dump)(struct hl_device *hdev, void *data);
@@ -1634,6 +1667,9 @@ struct hl_asic_funcs {
int (*access_dev_mem)(struct hl_device *hdev, enum pci_region region_type,
u64 addr, u64 *val, enum debugfs_access_type acc_type);
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
+ int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
+ u32 num_cores, u32 core_command);
+ int (*send_device_activity)(struct hl_device *hdev, bool open);
};
@@ -1727,10 +1763,10 @@ struct hl_cs_outcome {
/**
* struct hl_cs_outcome_store - represents a limited store of completed CS outcomes
- * @outcome_map: index of completed CS searcheable by sequence number
+ * @outcome_map: index of completed CS searchable by sequence number
* @used_list: list of outcome objects currently in use
* @free_list: list of outcome objects currently not in use
- * @nodes_pool: a static pool of preallocated outcome objects
+ * @nodes_pool: a static pool of pre-allocated outcome objects
* @db_lock: any operation on the store must take this lock
*/
struct hl_cs_outcome_store {
@@ -1754,12 +1790,10 @@ struct hl_cs_outcome_store {
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
- * @outcome_store: storage data structure used to remember ouitcomes of completed
+ * @outcome_store: storage data structure used to remember outcomes of completed
* command submissions for a long time after CS id wraparound.
* @va_range: holds available virtual addresses for host and dram mappings.
* @mem_hash_lock: protects the mem_hash.
- * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
- * MMU hash or walking the PGT requires talking this lock.
* @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
@@ -1767,6 +1801,7 @@ struct hl_cs_outcome_store {
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
* @sig_mgr: encaps signals handle manager.
+ * @cb_va_pool_base: the base address for the device VA pool
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
@@ -1795,13 +1830,13 @@ struct hl_ctx {
struct hl_cs_outcome_store outcome_store;
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
- struct mutex mmu_lock;
struct mutex hw_block_list_lock;
struct list_head debugfs_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
struct hl_encaps_signals_mgr sig_mgr;
+ u64 cb_va_pool_base;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -1823,7 +1858,6 @@ struct hl_ctx_mgr {
};
-
/*
* COMMAND SUBMISSIONS
*/
@@ -1889,7 +1923,7 @@ struct hl_userptr {
* @tdr_active: true if TDR was activated for this CS (to prevent
* double TDR activation).
* @aborted: true if CS was aborted due to some device error.
- * @timestamp: true if a timestmap must be captured upon completion.
+ * @timestamp: true if a timestamp must be captured upon completion.
* @staged_last: true if this is the last staged CS and needs completion.
* @staged_first: true if this is the first staged CS and we need to receive
* timeout for this CS.
@@ -2047,14 +2081,16 @@ struct hl_vm_hash_node {
* @node: node to hang on the list in context object.
* @ctx: the context this node belongs to.
* @vaddr: virtual address of the HW block.
- * @size: size of the block.
+ * @block_size: size of the block.
+ * @mapped_size: size of the block which is mapped. May change if partial un-mappings are done.
* @id: HW block id (handle).
*/
struct hl_vm_hw_block_list_node {
struct list_head node;
struct hl_ctx *ctx;
unsigned long vaddr;
- u32 size;
+ u32 block_size;
+ u32 mapped_size;
u32 id;
};
@@ -2214,7 +2250,7 @@ struct hl_info_list {
/**
* struct hl_debugfs_entry - debugfs dentry wrapper.
- * @info_ent: dentry realted ops.
+ * @info_ent: dentry related ops.
* @dev_entry: ASIC specific debugfs manager.
*/
struct hl_debugfs_entry {
@@ -2492,7 +2528,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \
(val) = __elbi_read; \
} else {\
- (val) = RREG32((u32)addr); \
+ (val) = RREG32((u32)(addr)); \
} \
if (cond) \
break; \
@@ -2503,7 +2539,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \
(val) = __elbi_read; \
} else {\
- (val) = RREG32((u32)addr); \
+ (val) = RREG32((u32)(addr)); \
} \
break; \
} \
@@ -2919,7 +2955,7 @@ struct razwi_info {
* struct undefined_opcode_info - info about last undefined opcode error
* @timestamp: timestamp of the undefined opcode error
* @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
- * entiers. In case all streams array entries are
+ * entries. In case all streams array entries are
* filled with values, it means the execution was in Lower-CP.
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
@@ -2946,12 +2982,12 @@ struct undefined_opcode_info {
};
/**
- * struct last_error_session_info - info about last session errors occurred.
- * @cs_timeout: CS timeout error last information.
- * @razwi: razwi last information.
+ * struct hl_error_info - holds information collected during an error.
+ * @cs_timeout: CS timeout error information.
+ * @razwi: razwi information.
* @undef_opcode: undefined opcode information
*/
-struct last_error_session_info {
+struct hl_error_info {
struct cs_timeout_info cs_timeout;
struct razwi_info razwi;
struct undefined_opcode_info undef_opcode;
@@ -2960,7 +2996,7 @@ struct last_error_session_info {
/**
* struct hl_reset_info - holds current device reset information.
* @lock: lock to protect critical reset flows.
- * @compute_reset_cnt: number of compte resets since the driver was loaded.
+ * @compute_reset_cnt: number of compute resets since the driver was loaded.
* @hard_reset_cnt: number of hard resets since the driver was loaded.
* @hard_reset_schedule_flags: hard reset is scheduled to after current compute reset,
* here we hold the hard reset flags.
@@ -2971,7 +3007,7 @@ struct last_error_session_info {
* @hard_reset_pending: is there a hard reset work pending.
* @curr_reset_cause: saves an enumerated reset cause when a hard reset is
* triggered, and cleared after it is shared with preboot.
- * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overridden
* with a new value on next reset
* @reset_trigger_repeated: set if device reset is triggered more than once with
* same cause.
@@ -3041,6 +3077,12 @@ struct hl_reset_info {
* @asid_mutex: protects asid_bitmap.
* @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
* @debug_lock: protects critical section of setting debug mode for device
+ * @mmu_lock: protects the MMU page tables and invalidation h/w. Although the
+ * page tables are per context, the invalidation h/w is per MMU.
+ * Therefore, we can't allow multiple contexts (we only have two,
+ * user and kernel) to access the invalidation h/w at the same time.
+ * In addition, any change to the PGT, modifying the MMU hash or
+ * walking the PGT requires talking this lock.
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
@@ -3049,7 +3091,7 @@ struct hl_reset_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
- * @cb_pool: list of preallocated CBs.
+ * @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
* @internal_cb_pool_dma_addr: internal command buffer pool dma address.
@@ -3070,7 +3112,7 @@ struct hl_reset_info {
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
* @clk_throttling: holds information about current/previous clock throttling events
- * @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_major_version: major version of current loaded preboot.
@@ -3111,7 +3153,8 @@ struct hl_reset_info {
* @edma_binning: contains mask of edma engines that is received from the f/w which
* indicates which edma engines are binned-out
* @id: device minor.
- * @id_control: minor of the control device
+ * @id_control: minor of the control device.
+ * @cdev_idx: char device index. Used for setting its name.
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
@@ -3165,6 +3208,7 @@ struct hl_reset_info {
* Used only for testing.
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
+ * @supports_ctx_switch: true if a ctx switch is required upon first submission.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -3204,6 +3248,7 @@ struct hl_device {
struct mutex asid_mutex;
struct mutex send_cpu_message_lock;
struct mutex debug_lock;
+ struct mutex mmu_lock;
struct asic_fixed_properties asic_prop;
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
@@ -3242,7 +3287,7 @@ struct hl_device {
struct multi_cs_completion multi_cs_completion[
MULTI_CS_MAX_USER_CTX];
struct hl_clk_throttle clk_throttling;
- struct last_error_session_info last_error;
+ struct hl_error_info captured_err_info;
struct hl_reset_info reset_info;
@@ -3271,6 +3316,7 @@ struct hl_device {
u32 edma_binning;
u16 id;
u16 id_control;
+ u16 cdev_idx;
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
@@ -3300,6 +3346,7 @@ struct hl_device {
u8 compute_ctx_in_release;
u8 supports_mmu_prefetch;
u8 reset_upon_device_release;
+ u8 supports_ctx_switch;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -3426,15 +3473,18 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
-void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag);
-void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
-void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
-void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
- dma_addr_t *dma_handle);
-void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr);
+void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, const char *caller);
+void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, const char *caller);
+void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, const char *caller);
+void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
+ const char *caller);
+void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle, const char *caller);
+void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
+ const char *caller);
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
@@ -3513,6 +3563,7 @@ void hl_sysfs_fini(struct hl_device *hdev);
int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
+void hl_hwmon_release_resources(struct hl_device *hdev);
int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
@@ -3557,7 +3608,7 @@ void hl_hw_block_mem_init(struct hl_ctx *ctx);
void hl_hw_block_mem_fini(struct hl_ctx *ctx);
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size, u32 alignment);
+ enum hl_va_range_type type, u64 size, u32 alignment);
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
@@ -3674,6 +3725,7 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
struct cpucp_hbm_row_info *info);
int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
+int hl_fw_send_device_activity(struct hl_device *hdev, bool open);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
@@ -3697,6 +3749,8 @@ int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *va
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value);
long hl_fw_get_max_power(struct hl_device *hdev);
void hl_fw_set_max_power(struct hl_device *hdev);
+int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
+ u32 nonce);
int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
@@ -3743,6 +3797,7 @@ struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args);
+__printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index f733ead605e7..112632afe7d5 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -14,6 +14,9 @@
#include <linux/aer.h>
#include <linux/module.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/habanalabs.h>
+
#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
@@ -27,7 +30,10 @@ static struct class *hl_class;
static DEFINE_IDR(hl_devs_idr);
static DEFINE_MUTEX(hl_devs_idr_lock);
-static int timeout_locked = 30;
+#define HL_DEFAULT_TIMEOUT_LOCKED 30 /* 30 seconds */
+#define GAUDI_DEFAULT_TIMEOUT_LOCKED 600 /* 10 minutes */
+
+static int timeout_locked = HL_DEFAULT_TIMEOUT_LOCKED;
static int reset_on_lockup = 1;
static int memory_scrub;
static ulong boot_error_status_mask = ULONG_MAX;
@@ -55,14 +61,12 @@ MODULE_PARM_DESC(boot_error_status_mask,
#define PCI_IDS_GAUDI_SEC 0x1010
#define PCI_IDS_GAUDI2 0x1020
-#define PCI_IDS_GAUDI2_SEC 0x1030
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2), },
- { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2_SEC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
@@ -92,9 +96,6 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GAUDI2:
asic_type = ASIC_GAUDI2;
break;
- case PCI_IDS_GAUDI2_SEC:
- asic_type = ASIC_GAUDI2_SEC;
- break;
default:
asic_type = ASIC_INVALID;
break;
@@ -107,7 +108,6 @@ static bool is_asic_secured(enum hl_asic_type asic_type)
{
switch (asic_type) {
case ASIC_GAUDI_SEC:
- case ASIC_GAUDI2_SEC:
return true;
default:
return false;
@@ -161,7 +161,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
mutex_lock(&hdev->fpriv_list_lock);
if (!hl_device_operational(hdev, &status)) {
- dev_err_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
@@ -207,11 +207,13 @@ int hl_device_open(struct inode *inode, struct file *filp)
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
+ hdev->asic_funcs->send_device_activity(hdev, true);
+
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_timeout.write_enable, 1);
- atomic_set(&hdev->last_error.razwi.write_enable, 1);
- hdev->last_error.undef_opcode.write_enable = true;
+ atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
+ atomic_set(&hdev->captured_err_info.razwi.write_enable, 1);
+ hdev->captured_err_info.undef_opcode.write_enable = true;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -269,7 +271,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_device_operational(hdev, NULL)) {
- dev_err_ratelimited(hdev->dev_ctrl,
+ dev_dbg_ratelimited(hdev->dev_ctrl,
"Can't open %s because it is disabled or in reset\n",
dev_name(hdev->dev_ctrl));
rc = -EPERM;
@@ -314,12 +316,22 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->boot_error_status_mask = boot_error_status_mask;
}
-static void fixup_device_params_per_asic(struct hl_device *hdev)
+static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
{
switch (hdev->asic_type) {
- case ASIC_GOYA:
case ASIC_GAUDI:
case ASIC_GAUDI_SEC:
+ /* If user didn't request a different timeout than the default one, we have
+ * a different default timeout for Gaudi
+ */
+ if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
+ hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
+ MSEC_PER_SEC);
+
+ hdev->reset_upon_device_release = 0;
+ break;
+
+ case ASIC_GOYA:
hdev->reset_upon_device_release = 0;
break;
@@ -339,7 +351,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
- hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * 1000);
+ hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
@@ -360,7 +372,7 @@ static int fixup_device_params(struct hl_device *hdev)
if (!hdev->cpu_queues_enable)
hdev->heartbeat = 0;
- fixup_device_params_per_asic(hdev);
+ fixup_device_params_per_asic(hdev, tmp_timeout);
return 0;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 6a30bd98ab5e..43afe40966e5 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
@@ -103,6 +104,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
+ hw_ip.security_enabled = prop->fw_security_enabled;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@@ -591,8 +593,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.seq = hdev->last_error.cs_timeout.seq;
- info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
+ info.seq = hdev->captured_err_info.cs_timeout.seq;
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -607,12 +609,12 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
- info.addr = hdev->last_error.razwi.addr;
- info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
- info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
- info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
- info.error_type = hdev->last_error.razwi.type;
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.razwi.timestamp);
+ info.addr = hdev->captured_err_info.razwi.addr;
+ info.engine_id_1 = hdev->captured_err_info.razwi.engine_id_1;
+ info.engine_id_2 = hdev->captured_err_info.razwi.engine_id_2;
+ info.no_engine_id = hdev->captured_err_info.razwi.non_engine_initiator;
+ info.error_type = hdev->captured_err_info.razwi.type;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -627,13 +629,13 @@ static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *ar
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.undef_opcode.timestamp);
- info.engine_id = hdev->last_error.undef_opcode.engine_id;
- info.cq_addr = hdev->last_error.undef_opcode.cq_addr;
- info.cq_size = hdev->last_error.undef_opcode.cq_size;
- info.stream_id = hdev->last_error.undef_opcode.stream_id;
- info.cb_addr_streams_len = hdev->last_error.undef_opcode.cb_addr_streams_len;
- memcpy(info.cb_addr_streams, hdev->last_error.undef_opcode.cb_addr_streams,
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
+ info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
+ info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
+ info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
+ info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
+ info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
+ memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
sizeof(info.cb_addr_streams));
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
@@ -660,6 +662,55 @@ static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct cpucp_sec_attest_info *sec_attest_info;
+ struct hl_info_sec_attest *info;
+ u32 max_size = args->return_size;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
+ if (!sec_attest_info)
+ return -ENOMEM;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto free_sec_attest_info;
+ }
+
+ rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
+ if (rc)
+ goto free_info;
+
+ info->nonce = le32_to_cpu(sec_attest_info->nonce);
+ info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
+ info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
+ info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
+ info->pcr_num_reg = sec_attest_info->pcr_num_reg;
+ info->pcr_reg_len = sec_attest_info->pcr_reg_len;
+ info->quote_sig_len = sec_attest_info->quote_sig_len;
+ memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
+ memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
+ memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
+ memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
+ memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
+
+ rc = copy_to_user(out, info,
+ min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
+
+free_info:
+ kfree(info);
+free_sec_attest_info:
+ kfree(sec_attest_info);
+
+ return rc;
+}
+
static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
int rc;
@@ -697,6 +748,42 @@ static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
return 0;
}
+static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ u32 status_buf_size = args->return_size;
+ struct hl_device *hdev = hpriv->hdev;
+ struct engines_data eng_data;
+ int rc;
+
+ if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
+ return -EINVAL;
+
+ eng_data.actual_size = 0;
+ eng_data.allocated_buf_size = status_buf_size;
+ eng_data.buf = vmalloc(status_buf_size);
+ if (!eng_data.buf)
+ return -ENOMEM;
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
+
+ if (eng_data.actual_size > eng_data.allocated_buf_size) {
+ dev_err(hdev->dev,
+ "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
+ eng_data.actual_size, status_buf_size);
+ vfree(eng_data.buf);
+ return -ENOMEM;
+ }
+
+ args->user_buffer_actual_size = eng_data.actual_size;
+ rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
+ -EFAULT : 0;
+
+ vfree(eng_data.buf);
+
+ return rc;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -806,12 +893,18 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DRAM_PENDING_ROWS:
return dram_pending_rows_info(hpriv, args);
+ case HL_INFO_SECURED_ATTESTATION:
+ return sec_attest_info(hpriv, args);
+
case HL_INFO_REGISTER_EVENTFD:
return eventfd_register(hpriv, args);
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
+ case HL_INFO_ENGINE_STATUS:
+ return engine_status_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 3f15ab9d827f..d0087c0ec48c 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -826,9 +826,7 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
q->kernel_address = p;
- q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
- sizeof(*q->shadow_queue),
- GFP_KERNEL);
+ q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, sizeof(struct hl_cs_job *), GFP_KERNEL);
if (!q->shadow_queue) {
dev_err(hdev->dev,
"Failed to allocate shadow queue for H/W queue %d\n",
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 57f5d2c48330..55eb0203817f 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -194,7 +194,8 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sen
curr_arr[sensors_by_type_next_index[type]++] = flags;
}
- channels_info = kcalloc(num_active_sensor_types + 1, sizeof(*channels_info), GFP_KERNEL);
+ channels_info = kcalloc(num_active_sensor_types + 1, sizeof(struct hwmon_channel_info *),
+ GFP_KERNEL);
if (!channels_info) {
rc = -ENOMEM;
goto channels_info_array_err;
@@ -910,3 +911,24 @@ void hl_hwmon_fini(struct hl_device *hdev)
hwmon_device_unregister(hdev->hwmon_dev);
}
+
+void hl_hwmon_release_resources(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 61bc1bfe984a..ef28f3b37b93 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -457,7 +457,7 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
prev = list_prev_entry(va_block, node);
if (&prev->node != va_list && prev->end + 1 == va_block->start) {
prev->end = va_block->end;
- prev->size = prev->end - prev->start;
+ prev->size = prev->end - prev->start + 1;
list_del(&va_block->node);
kfree(va_block);
va_block = prev;
@@ -466,7 +466,7 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
next = list_next_entry(va_block, node);
if (&next->node != va_list && va_block->end + 1 == next->start) {
next->start = va_block->start;
- next->size = next->end - next->start;
+ next->size = next->end - next->start + 1;
list_del(&va_block->node);
kfree(va_block);
}
@@ -755,7 +755,7 @@ out:
* - Return the start address of the virtual block.
*/
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size, u32 alignment)
+ enum hl_va_range_type type, u64 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
max(alignment, ctx->va_range[type]->page_size),
@@ -1210,18 +1210,18 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
goto va_block_err;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto map_err;
@@ -1362,7 +1362,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
else
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
@@ -1375,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
/*
* If the context is closing we don't need to check for the MMU cache
@@ -1418,18 +1418,23 @@ vm_type_err:
return rc;
}
-static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
- u32 *size)
+static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
{
- u32 block_id = 0;
+ u32 block_id;
int rc;
+ *handle = 0;
+ if (size)
+ *size = 0;
+
rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
+ if (rc)
+ return rc;
*handle = block_id | HL_MMAP_TYPE_BLOCK;
*handle <<= PAGE_SHIFT;
- return rc;
+ return 0;
}
static void hw_block_vm_close(struct vm_area_struct *vma)
@@ -1437,6 +1442,13 @@ static void hw_block_vm_close(struct vm_area_struct *vma)
struct hl_vm_hw_block_list_node *lnode =
(struct hl_vm_hw_block_list_node *) vma->vm_private_data;
struct hl_ctx *ctx = lnode->ctx;
+ long new_mmap_size;
+
+ new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
+ if (new_mmap_size > 0) {
+ lnode->mapped_size = new_mmap_size;
+ return;
+ }
mutex_lock(&ctx->hw_block_list_lock);
list_del(&lnode->node);
@@ -1487,23 +1499,23 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
if (!lnode)
return -ENOMEM;
- vma->vm_ops = &hw_block_vm_ops;
- vma->vm_private_data = lnode;
-
- hl_ctx_get(ctx);
-
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
- hl_ctx_put(ctx);
kfree(lnode);
return rc;
}
+ hl_ctx_get(ctx);
+
lnode->ctx = ctx;
lnode->vaddr = vma->vm_start;
- lnode->size = block_size;
+ lnode->block_size = block_size;
+ lnode->mapped_size = lnode->block_size;
lnode->id = block_id;
+ vma->vm_private_data = lnode;
+ vma->vm_ops = &hw_block_vm_ops;
+
mutex_lock(&ctx->hw_block_list_lock);
list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
mutex_unlock(&ctx->hw_block_list_lock);
@@ -2296,8 +2308,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EFAULT;
}
- userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
- GFP_KERNEL);
+ userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!userptr->pages)
return -ENOMEM;
@@ -2759,13 +2770,13 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
unmap_device_va(ctx, &args, true);
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
/* invalidate the cache once after the unmapping loop */
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
index 56df962d2f3c..1936d653699e 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -11,7 +11,7 @@
* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
* the buffer descriptor.
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Find the buffer in the store and return a pointer to its descriptor.
@@ -104,7 +104,7 @@ int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
* hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
* given handle.
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Decrease the reference to the buffer, and release it if it was the last one.
@@ -137,7 +137,7 @@ int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
/**
* hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
* @gfp: gfp flags to use for the memory allocations
* @args: additional args passed to behavior->alloc
@@ -222,7 +222,7 @@ static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
/**
* hl_mem_mgr_mmap - map the given buffer to the user
*
- * @mmg: unifed memory manager
+ * @mmg: unified memory manager
* @vma: the vma object for which mmap was closed.
* @args: additional args passed to behavior->mmap
*
@@ -322,7 +322,7 @@ void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
/**
* hl_mem_mgr_fini - release unified memory manager
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 60740de47b34..cf8946266615 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -9,6 +9,8 @@
#include "../habanalabs.h"
+#include <trace/events/habanalabs.h>
+
/**
* hl_mmu_get_funcs() - get MMU functions structure
* @hdev: habanalabs device structure.
@@ -45,6 +47,8 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
+ mutex_init(&hdev->mmu_lock);
+
if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
if (rc)
@@ -86,6 +90,8 @@ void hl_mmu_fini(struct hl_device *hdev)
if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
hdev->mmu_func[MMU_HR_PGT].fini(hdev);
+
+ mutex_destroy(&hdev->mmu_lock);
}
/**
@@ -104,8 +110,6 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
if (!hdev->mmu_enable)
return 0;
- mutex_init(&ctx->mmu_lock);
-
if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
if (rc)
@@ -149,8 +153,6 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
-
- mutex_destroy(&ctx->mmu_lock);
}
/*
@@ -259,6 +261,9 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
if (flush_pte)
mmu_funcs->flush(ctx);
+ if (trace_habanalabs_mmu_unmap_enabled() && !rc)
+ trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
+
return rc;
}
@@ -344,6 +349,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
if (flush_pte)
mmu_funcs->flush(ctx);
+ trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
+
return 0;
err:
@@ -403,6 +410,8 @@ int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
dev_err(hdev->dev,
"Map failed for va 0x%llx to pa 0x%llx\n",
curr_va, curr_pa);
+ /* last mapping failed so don't try to unmap it - reduce off by page_size */
+ off -= page_size;
goto unmap;
}
}
@@ -600,9 +609,9 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
return rc;
@@ -692,16 +701,16 @@ static void hl_mmu_prefetch_work_function(struct work_struct *work)
{
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
struct hl_ctx *ctx = pfw->ctx;
+ struct hl_device *hdev = ctx->hdev;
- if (!hl_device_operational(ctx->hdev, NULL))
+ if (!hl_device_operational(hdev, NULL))
goto put_ctx;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
- ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
- pfw->va, pfw->size);
+ hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
put_ctx:
/*
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 6c5271f01160..36e9814139d1 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -375,6 +375,14 @@ out:
return max_size;
}
+static ssize_t security_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->asic_prop.fw_security_enabled);
+}
+
static DEVICE_ATTR_RO(armcp_kernel_ver);
static DEVICE_ATTR_RO(armcp_ver);
static DEVICE_ATTR_RO(cpld_ver);
@@ -393,6 +401,7 @@ static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
static DEVICE_ATTR_RO(fw_os_ver);
+static DEVICE_ATTR_RO(security_enabled);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
@@ -417,6 +426,7 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
&dev_attr_fw_os_ver.attr,
+ &dev_attr_security_enabled.attr,
NULL,
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index cb2988e2c7a8..92560414e843 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -899,12 +899,13 @@ static int gaudi_early_fini(struct hl_device *hdev)
*/
static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
- if (hdev->asic_prop.fw_security_enabled) {
+ if ((hdev->fw_components & FW_TYPE_LINUX) &&
+ (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
@@ -939,9 +940,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
else
freq = pll_clk / (div_fctr + 1);
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
+ dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel);
freq = 0;
}
}
@@ -985,9 +984,10 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
- dst_addr = (prop->sram_user_base_address &
- GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
- GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+
+ /* TPC_CMD is configured with I$ prefetch enabled, so address should be aligned to 8KB */
+ dst_addr = FIELD_PREP(GAUDI_PKT_LIN_DMA_DST_ADDR_MASK,
+ round_up(prop->sram_user_base_address, SZ_8K));
init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
@@ -1683,23 +1683,7 @@ disable_pci_access:
static void gaudi_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
- int i = 0;
-
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
@@ -4723,7 +4707,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev)
addr = prop->sram_user_base_address;
size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET;
- dev_dbg(hdev->dev, "Scrubing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
+ dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
addr, addr + size, val);
rc = gaudi_memset_device_memory(hdev, addr, size, val);
if (rc) {
@@ -6911,9 +6895,9 @@ static void gaudi_handle_sw_config_stream_data(struct hl_device *hdev, u32 strea
stream, cq_ptr, size);
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
- hdev->last_error.undef_opcode.cq_addr = cq_ptr;
- hdev->last_error.undef_opcode.cq_size = size;
- hdev->last_error.undef_opcode.stream_id = stream;
+ hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr;
+ hdev->captured_err_info.undef_opcode.cq_size = size;
+ hdev->captured_err_info.undef_opcode.stream_id = stream;
}
}
@@ -6979,7 +6963,7 @@ static void gaudi_handle_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
}
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
- struct undefined_opcode_info *undef_opcode = &hdev->last_error.undef_opcode;
+ struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode;
u32 arr_idx = undef_opcode->cb_addr_streams_len;
if (arr_idx == 0) {
@@ -7063,11 +7047,11 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
/* check for undefined opcode */
if (glbl_sts_val & TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK &&
- hdev->last_error.undef_opcode.write_enable) {
- memset(&hdev->last_error.undef_opcode, 0,
- sizeof(hdev->last_error.undef_opcode));
+ hdev->captured_err_info.undef_opcode.write_enable) {
+ memset(&hdev->captured_err_info.undef_opcode, 0,
+ sizeof(hdev->captured_err_info.undef_opcode));
- hdev->last_error.undef_opcode.write_enable = false;
+ hdev->captured_err_info.undef_opcode.write_enable = false;
*event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
}
@@ -7233,12 +7217,6 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
switch (event_type) {
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
- /* In TPC QM event, notify on TPC assertion. While there isn't
- * a specific event for assertion yet, the FW generates QM event.
- * The SW upper layer will inspect an internal mapped area to indicate
- * if the event is a tpc assertion or tpc QM.
- */
- *event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
index = event_type - GAUDI_EVENT_TPC0_QM;
qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
@@ -7349,18 +7327,19 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
/* In case it's the first razwi, save its parameters*/
- rc = atomic_cmpxchg(&hdev->last_error.razwi.write_enable, 1, 0);
+ rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0);
if (rc) {
- hdev->last_error.razwi.timestamp = ktime_get();
- hdev->last_error.razwi.addr = razwi_addr;
- hdev->last_error.razwi.engine_id_1 = engine_id_1;
- hdev->last_error.razwi.engine_id_2 = engine_id_2;
+ hdev->captured_err_info.razwi.timestamp = ktime_get();
+ hdev->captured_err_info.razwi.addr = razwi_addr;
+ hdev->captured_err_info.razwi.engine_id_1 = engine_id_1;
+ hdev->captured_err_info.razwi.engine_id_2 = engine_id_2;
/*
* If first engine id holds non valid value the razwi initiator
* does not have engine id
*/
- hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX);
- hdev->last_error.razwi.type = razwi_type;
+ hdev->captured_err_info.razwi.non_engine_initiator =
+ (engine_id_1 == U16_MAX);
+ hdev->captured_err_info.razwi.type = razwi_type;
}
}
@@ -7427,7 +7406,7 @@ static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
event_type, desc);
}
-static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
+static int gaudi_compute_reset_late_init(struct hl_device *hdev)
{
/* GAUDI doesn't support any reset except hard-reset */
return -EPERM;
@@ -7702,6 +7681,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
@@ -7711,6 +7691,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7722,6 +7703,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -7733,6 +7715,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_TPC0_DEC:
@@ -7743,10 +7726,17 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_TPC5_DEC:
case GAUDI_EVENT_TPC6_DEC:
case GAUDI_EVENT_TPC7_DEC:
+ /* In TPC DEC event, notify on TPC assertion. While there isn't
+ * a specific event for assertion yet, the FW generates TPC DEC event.
+ * The SW upper layer will inspect an internal mapped area to indicate
+ * if the event is a TPC Assertion or a "real" TPC DEC.
+ */
+ event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
gaudi_print_irq_info(hdev, event_type, true);
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
@@ -7755,6 +7745,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
@@ -7770,6 +7761,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
@@ -7778,6 +7770,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
@@ -7806,9 +7799,25 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_PCIE_DEC:
+ case GAUDI_EVENT_CPU_AXI_SPLITTER:
+ case GAUDI_EVENT_PSOC_AXI_DEC:
+ case GAUDI_EVENT_PSOC_PRSTN_FALL:
+ gaudi_print_irq_info(hdev, event_type, true);
+ hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ break;
+
+ case GAUDI_EVENT_MMU_PAGE_FAULT:
+ case GAUDI_EVENT_MMU_WR_PERM:
+ gaudi_print_irq_info(hdev, event_type, true);
+ hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ break;
+
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
case GAUDI_EVENT_MME1_WBC_RSP:
@@ -7817,11 +7826,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_MME2_SBAB0_RSP:
case GAUDI_EVENT_MME3_WBC_RSP:
case GAUDI_EVENT_MME3_SBAB0_RSP:
- case GAUDI_EVENT_CPU_AXI_SPLITTER:
- case GAUDI_EVENT_PSOC_AXI_DEC:
- case GAUDI_EVENT_PSOC_PRSTN_FALL:
- case GAUDI_EVENT_MMU_PAGE_FAULT:
- case GAUDI_EVENT_MMU_WR_PERM:
case GAUDI_EVENT_RAZWI_OR_ADC:
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
@@ -7841,10 +7845,12 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_qman_err(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET);
break;
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
goto reset_device;
case GAUDI_EVENT_TPC0_BMON_SPMU:
@@ -7858,11 +7864,13 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
gaudi_print_irq_info(hdev, event_type, false);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_NIC_SEI_0 ... GAUDI_EVENT_NIC_SEI_4:
gaudi_print_nic_axi_irq_info(hdev, event_type, &data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
@@ -7870,6 +7878,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
rc = hl_state_dump(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (rc)
dev_err(hdev->dev,
"Error during system state dump %d\n", rc);
@@ -7880,6 +7889,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break;
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
gaudi_print_clk_change_info(hdev, event_type);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -7889,20 +7899,24 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
dev_err(hdev->dev,
"Received high temp H/W interrupt %d (cause %d)\n",
event_type, cause);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DEV_RESET_REQ:
gaudi_print_irq_info(hdev, event_type, false);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
default:
@@ -8066,8 +8080,8 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
struct gaudi_device *gaudi = hdev->asic_specific;
const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
@@ -8079,8 +8093,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
u64 offset;
int i, dma_id, port;
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
"--- ------- ------------ ---------- -------------\n");
@@ -8097,14 +8111,14 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
- if (s)
- seq_printf(s, fmt, dma_id,
+ if (e)
+ hl_engine_data_sprintf(e, fmt, dma_id,
is_eng_idle ? "Y" : "N", qm_glbl_sts0,
qm_cgm_sts, dma_core_sts0);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
"--- ------- ------------ ---------- ----------\n");
@@ -8119,14 +8133,14 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
- if (s)
- seq_printf(s, fmt, i,
+ if (e)
+ hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
"--- ------- ------------ ---------- -----------\n");
@@ -8147,20 +8161,21 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
- if (s) {
+ if (e) {
if (!is_slave)
- seq_printf(s, fmt, i,
+ hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
else
- seq_printf(s, mme_slave_fmt, i,
+ hl_engine_data_sprintf(e, mme_slave_fmt, i,
is_eng_idle ? "Y" : "N", "-",
"-", mme_arch_sts);
}
}
- if (s)
- seq_puts(s, "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
"--- ------- ------------ ----------\n");
for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
@@ -8174,8 +8189,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
- if (s)
- seq_printf(s, nic_fmt, port,
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
@@ -8189,15 +8204,15 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
- if (s)
- seq_printf(s, nic_fmt, port,
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
}
- if (s)
- seq_puts(s, "\n");
+ if (e)
+ hl_engine_data_sprintf(e, "\n");
return is_idle;
}
@@ -8392,13 +8407,13 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
goto destroy_internal_cb_pool;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto unreserve_internal_cb_pool;
@@ -8425,13 +8440,13 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9148,6 +9163,11 @@ static void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group
dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs;
}
+static int gaudi_send_device_activity(struct hl_device *hdev, bool open)
+{
+ return 0;
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -9192,11 +9212,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
.send_heartbeat = gaudi_send_heartbeat,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
- .non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
+ .compute_reset_late_init = gaudi_compute_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
- .kdma_lock = NULL,
- .kdma_unlock = NULL,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
.get_monitor_dump = gaudi_get_monitor_dump,
@@ -9242,6 +9260,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
+ .send_device_activity = gaudi_send_device_activity,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/misc/habanalabs/gaudi2/gaudi2.c
index 98336a1a84b0..65e6cae6100a 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2.c
@@ -21,7 +21,7 @@
#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
-#define GAUDI2_RESET_TIMEOUT_MSEC 500 /* 500ms */
+#define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
#define GAUDI2_RESET_POLL_TIMEOUT_USEC 50000 /* 50ms */
#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
@@ -117,6 +117,12 @@
#define MMU_RANGE_INV_ASID_EN_SHIFT 1
#define MMU_RANGE_INV_ASID_SHIFT 2
+/* The last SPI_SEI cause bit, "burst_fifo_full", is expected to be triggered in PMMU because it has
+ * a 2 entries FIFO, and hence it is not enabled for it.
+ */
+#define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0)
+#define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0)
+
#define GAUDI2_MAX_STRING_LEN 64
#define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \
@@ -610,7 +616,7 @@ static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] =
"qman_axi_err",
"wap sei (wbc axi err)",
"arc sei",
- "mme_cfg_unalign_addr",
+ "cfg access error",
"qm_sw_err",
"sbte_dbg_intr_0",
"sbte_dbg_intr_1",
@@ -1525,17 +1531,57 @@ static const u32 rtr_coordinates_to_rtr_id[NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES]
RTR_ID_X_Y(17, 11)
};
+enum rtr_id {
+ DCORE0_RTR0,
+ DCORE0_RTR1,
+ DCORE0_RTR2,
+ DCORE0_RTR3,
+ DCORE0_RTR4,
+ DCORE0_RTR5,
+ DCORE0_RTR6,
+ DCORE0_RTR7,
+ DCORE1_RTR0,
+ DCORE1_RTR1,
+ DCORE1_RTR2,
+ DCORE1_RTR3,
+ DCORE1_RTR4,
+ DCORE1_RTR5,
+ DCORE1_RTR6,
+ DCORE1_RTR7,
+ DCORE2_RTR0,
+ DCORE2_RTR1,
+ DCORE2_RTR2,
+ DCORE2_RTR3,
+ DCORE2_RTR4,
+ DCORE2_RTR5,
+ DCORE2_RTR6,
+ DCORE2_RTR7,
+ DCORE3_RTR0,
+ DCORE3_RTR1,
+ DCORE3_RTR2,
+ DCORE3_RTR3,
+ DCORE3_RTR4,
+ DCORE3_RTR5,
+ DCORE3_RTR6,
+ DCORE3_RTR7,
+};
+
static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
- 1, 1, 2, 2, 3, 3, 14, 14, 13, 13, 12, 12, 19, 19, 18, 18, 17,
- 17, 28, 28, 29, 29, 30, 30, 0
+ DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3,
+ DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4,
+ DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1,
+ DCORE3_RTR4, DCORE3_RTR4, DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6,
+ DCORE0_RTR0
};
static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = {
- 0, 0, 15, 15, 16, 16, 31, 31, 0, 0
+ DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0,
+ DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = {
- 15, 15, 15, 15, 15, 16, 16, 16, 16, 31, 31, 31
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
+ DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
struct sft_info {
@@ -1548,11 +1594,11 @@ static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE
};
static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = {
- 0, 0
+ DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = {
- 16, 31
+ DCORE2_RTR0, DCORE3_RTR7
};
struct mme_initiators_rtr_id {
@@ -1663,7 +1709,7 @@ struct gaudi2_cache_invld_params {
};
struct gaudi2_tpc_idle_data {
- struct seq_file *s;
+ struct engines_data *e;
unsigned long *mask;
bool *is_idle;
const char *tpc_fmt;
@@ -1706,6 +1752,9 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
int dcore, inst, tpc_seq;
u32 offset;
+ /* init the return code */
+ ctx->rc = 0;
+
for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) {
for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) {
tpc_seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
@@ -1715,7 +1764,12 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
offset = (DCORE_OFFSET * dcore) + (DCORE_TPC_OFFSET * inst);
- ctx->fn(hdev, dcore, inst, offset, ctx->data);
+ ctx->fn(hdev, dcore, inst, offset, ctx);
+ if (ctx->rc) {
+ dev_err(hdev->dev, "TPC iterator failed for DCORE%d TPC%d\n",
+ dcore, inst);
+ return;
+ }
}
}
@@ -1724,7 +1778,9 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
/* special check for PCI TPC (DCORE0_TPC6) */
offset = DCORE_TPC_OFFSET * (NUM_DCORE0_TPC - 1);
- ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx->data);
+ ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx);
+ if (ctx->rc)
+ dev_err(hdev->dev, "TPC iterator failed for DCORE0 TPC6\n");
}
static bool gaudi2_host_phys_addr_valid(u64 addr)
@@ -1973,6 +2029,7 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
}
+ prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
@@ -2005,9 +2062,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
- prop->cb_va_start_addr = VA_HOST_SPACE_USER_MAPPED_CB_START;
- prop->cb_va_end_addr = VA_HOST_SPACE_USER_MAPPED_CB_END;
-
prop->max_dec = NUMBER_OF_DEC;
prop->clk_pll_index = HL_GAUDI2_MME_PLL;
@@ -2477,7 +2531,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
resource_size_t pci_bar_size;
- u32 fw_boot_status;
int rc;
rc = gaudi2_set_fixed_properties(hdev);
@@ -2505,22 +2558,14 @@ static int gaudi2_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, DRAM_BAR_ID);
- /* If FW security is enabled at this point it means no access to ELBI */
- if (hdev->asic_prop.fw_security_enabled) {
- hdev->asic_prop.iatu_done_by_fw = true;
- goto pci_init;
- }
-
- rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0, &fw_boot_status);
- if (rc)
- goto free_queue_props;
-
- /* Check whether FW is configuring iATU */
- if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
- (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+ /*
+ * Only in pldm driver config iATU
+ */
+ if (hdev->pldm)
+ hdev->asic_prop.iatu_done_by_fw = false;
+ else
hdev->asic_prop.iatu_done_by_fw = true;
-pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
@@ -2676,6 +2721,8 @@ static int gaudi2_late_init(struct hl_device *hdev)
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
+ hdev->asic_prop.supports_advanced_cpucp_rc = true;
+
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
gaudi2->virt_msix_db_dma_addr);
if (rc) {
@@ -2703,23 +2750,7 @@ disable_pci_access:
static void gaudi2_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
- int i = 0;
-
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static void gaudi2_user_mapped_dec_init(struct gaudi2_device *gaudi2, u32 start_idx)
@@ -2917,7 +2948,7 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
static inline int gaudi2_get_non_zero_random_int(void)
{
- int rand = get_random_int();
+ int rand = get_random_u32();
return rand ? rand : 1;
}
@@ -2994,7 +3025,6 @@ static int gaudi2_sw_init(struct hl_device *hdev)
}
spin_lock_init(&gaudi2->hw_queues_lock);
- spin_lock_init(&gaudi2->kdma_lock);
gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
&gaudi2->scratchpad_bus_address,
@@ -3551,7 +3581,7 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
rc = gaudi2_dec_enable_msix(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to enable decoder IRQ");
- goto free_completion_irq;
+ goto free_event_irq;
}
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
@@ -3582,6 +3612,10 @@ free_user_irq:
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+free_event_irq:
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
+ free_irq(irq, cq);
+
free_completion_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
free_irq(irq, cq);
@@ -3745,14 +3779,16 @@ static void gaudi2_stop_dec(struct hl_device *hdev)
gaudi2_stop_pcie_dec(hdev);
}
-static void gaudi2_halt_arc(struct hl_device *hdev, u32 cpu_id)
+static void gaudi2_set_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
{
u32 reg_base, reg_val;
reg_base = gaudi2_arc_blocks_bases[cpu_id];
+ if (run_mode == HL_ENGINE_CORE_RUN)
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 1);
+ else
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
- /* Halt ARC */
- reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
WREG32(reg_base + ARC_HALT_REQ_OFFSET, reg_val);
}
@@ -3762,8 +3798,35 @@ static void gaudi2_halt_arcs(struct hl_device *hdev)
for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) {
if (gaudi2_is_arc_enabled(hdev, arc_id))
- gaudi2_halt_arc(hdev, arc_id);
+ gaudi2_set_arc_running_mode(hdev, arc_id, HL_ENGINE_CORE_HALT);
+ }
+}
+
+static int gaudi2_verify_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
+{
+ int rc;
+ u32 reg_base, val, ack_mask, timeout_usec = 100000;
+
+ if (hdev->pldm)
+ timeout_usec *= 100;
+
+ reg_base = gaudi2_arc_blocks_bases[cpu_id];
+ if (run_mode == HL_ENGINE_CORE_RUN)
+ ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK;
+ else
+ ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK;
+
+ rc = hl_poll_timeout(hdev, reg_base + ARC_HALT_ACK_OFFSET,
+ val, ((val & ack_mask) == ack_mask),
+ 1000, timeout_usec);
+
+ if (!rc) {
+ /* Clear */
+ val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0);
+ WREG32(reg_base + ARC_HALT_REQ_OFFSET, val);
}
+
+ return rc;
}
static void gaudi2_reset_arcs(struct hl_device *hdev)
@@ -3790,8 +3853,39 @@ static void gaudi2_nic_qmans_manual_flush(struct hl_device *hdev)
queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
- for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN)
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!(hdev->nic_ports_mask & BIT(i)))
+ continue;
+
gaudi2_qman_manual_flush_common(hdev, queue_id);
+ }
+}
+
+static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
+ u32 num_cores, u32 core_command)
+{
+ int i, rc;
+
+
+ for (i = 0 ; i < num_cores ; i++) {
+ if (gaudi2_is_arc_enabled(hdev, core_ids[i]))
+ gaudi2_set_arc_running_mode(hdev, core_ids[i], core_command);
+ }
+
+ for (i = 0 ; i < num_cores ; i++) {
+ if (gaudi2_is_arc_enabled(hdev, core_ids[i])) {
+ rc = gaudi2_verify_arc_running_mode(hdev, core_ids[i], core_command);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to %s arc: %d\n",
+ (core_command == HL_ENGINE_CORE_HALT) ?
+ "HALT" : "RUN", core_ids[i]);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
}
static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
@@ -4124,11 +4218,15 @@ static void gaudi2_init_qman_common(struct hl_device *hdev, u32 reg_base,
WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0);
/* Enable the QMAN channel.
- * PDMA1 QMAN configuration is different, as we do not allow user to
- * access CP2/3, it is reserved for the ARC usage.
+ * PDMA QMAN configuration is different, as we do not allow user to
+ * access some of the CPs.
+ * PDMA0: CP2/3 are reserved for the ARC usage.
+ * PDMA1: CP1/2/3 are reserved for the ARC usage.
*/
if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0])
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA1_QMAN_ENABLE);
+ else if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0])
+ WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA0_QMAN_ENABLE);
else
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, QMAN_ENABLE);
}
@@ -4501,10 +4599,10 @@ struct gaudi2_tpc_init_cfg_data {
};
static void gaudi2_init_tpc_config(struct hl_device *hdev, int dcore, int inst,
- u32 offset, void *data)
+ u32 offset, struct iterate_module_ctx *ctx)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
- struct gaudi2_tpc_init_cfg_data *cfg_data = data;
+ struct gaudi2_tpc_init_cfg_data *cfg_data = ctx->data;
u32 queue_id_base;
u8 seq;
@@ -4956,8 +5054,7 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
return 0;
}
-static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base,
- u32 stlb_base)
+static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base)
{
u32 status, timeout_usec;
int rc;
@@ -4985,7 +5082,6 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base,
return rc;
WREG32(mmu_base + MMU_BYPASS_OFFSET, 0);
- WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, 0xF);
rc = hl_poll_timeout(
hdev,
@@ -5042,6 +5138,8 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev)
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK);
}
+ WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK);
+
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
@@ -5092,6 +5190,8 @@ static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, 1,
STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK);
+ WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK);
+
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
@@ -5339,7 +5439,10 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
if (!driver_performs_reset) {
/* set SP to indicate reset request sent to FW */
- WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
+ if (dyn_regs->cpu_rst_status)
+ WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA);
+ else
+ WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
@@ -5527,10 +5630,11 @@ static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id)
u64 hw_test_cap_bit = 0;
switch (hw_queue_id) {
- case GAUDI2_QUEUE_ID_PDMA_0_0 ... GAUDI2_QUEUE_ID_PDMA_1_1:
+ case GAUDI2_QUEUE_ID_PDMA_0_0:
+ case GAUDI2_QUEUE_ID_PDMA_0_1:
+ case GAUDI2_QUEUE_ID_PDMA_1_0:
hw_cap_mask = HW_CAP_PDMA_MASK;
break;
-
case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0) >> 2);
@@ -6129,7 +6233,7 @@ done:
return ret_val;
}
-static int gaudi2_non_hard_reset_late_init(struct hl_device *hdev)
+static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
size_t irq_arr_size;
@@ -6147,9 +6251,9 @@ static int gaudi2_non_hard_reset_late_init(struct hl_device *hdev)
}
static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_idle_data *idle_data = (struct gaudi2_tpc_idle_data *)data;
+ struct gaudi2_tpc_idle_data *idle_data = ctx->data;
u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
bool is_eng_idle;
int engine_idx;
@@ -6172,14 +6276,15 @@ static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int ins
if (idle_data->mask && !is_eng_idle)
set_bit(engine_idx, idle_data->mask);
- if (idle_data->s)
- seq_printf(idle_data->s, idle_data->tpc_fmt, dcore, inst,
+ if (idle_data->e)
+ hl_engine_data_sprintf(idle_data->e,
+ idle_data->tpc_fmt, dcore, inst,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
-static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_idle_ind_mask,
mme_arch_sts, dec_swreg15, dec_enabled_bit;
@@ -6197,7 +6302,7 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
struct gaudi2_tpc_idle_data tpc_idle_data = {
.tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
- .s = s,
+ .e = e,
.mask = mask,
.is_idle = &is_idle,
};
@@ -6209,8 +6314,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
int engine_idx, i, j;
/* EDMA, Two engines per Dcore */
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
"---- ---- ------- ------------ ----------------------\n");
@@ -6239,19 +6344,19 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, edma_fmt, i, j,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ if (e)
+ hl_engine_data_sprintf(e, edma_fmt, i, j,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0,
+ dma_core_idle_ind_mask);
}
}
/* PDMA, Two engines in Full chip */
- if (s)
- seq_puts(s,
- "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ------- ------------ ----------------------\n");
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
+ "---- ------- ------------ ----------------------\n");
for (i = 0 ; i < NUM_OF_PDMA ; i++) {
engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
@@ -6269,16 +6374,16 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, pdma_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ if (e)
+ hl_engine_data_sprintf(e, pdma_fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, dma_core_idle_ind_mask);
}
/* NIC, twelve macros in Full chip */
- if (s && hdev->nic_ports_mask)
- seq_puts(s,
- "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
- "--- ------- ------------ ----------\n");
+ if (e && hdev->nic_ports_mask)
+ hl_engine_data_sprintf(e,
+ "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
+ "--- ------- ------------ ----------\n");
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
if (!(i & 1))
@@ -6302,15 +6407,15 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, nic_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
- qm_cgm_sts);
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts);
}
- if (s)
- seq_puts(s,
- "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
- "--- ---- ------- ------------ ---------------\n");
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
+ "--- ---- ------- ------------ ---------------\n");
/* MME, one per Dcore */
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
engine_idx = GAUDI2_DCORE0_ENGINE_ID_MME + i * GAUDI2_ENGINE_ID_DCORE_OFFSET;
@@ -6327,8 +6432,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
is_eng_idle &= IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
- if (s)
- seq_printf(s, mme_fmt, i, "N",
+ if (e)
+ hl_engine_data_sprintf(e, mme_fmt, i, "N",
is_eng_idle ? "Y" : "N",
qm_glbl_sts0,
mme_arch_sts);
@@ -6340,16 +6445,16 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
/*
* TPC
*/
- if (s && prop->tpc_enabled_mask)
- seq_puts(s,
+ if (e && prop->tpc_enabled_mask)
+ hl_engine_data_sprintf(e,
"\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_IDLE_IND_MASK\n"
"---- --- -------- ------------ ---------- ----------------------\n");
gaudi2_iterate_tpcs(hdev, &tpc_iter);
/* Decoders, two each Dcore and two shared PCIe decoders */
- if (s && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
- seq_puts(s,
+ if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
+ hl_engine_data_sprintf(e,
"\nCORE DEC is_idle VSI_CMD_SWREG15\n"
"---- --- ------- ---------------\n");
@@ -6370,13 +6475,14 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, dec_fmt, i, j, is_eng_idle ? "Y" : "N", dec_swreg15);
+ if (e)
+ hl_engine_data_sprintf(e, dec_fmt, i, j,
+ is_eng_idle ? "Y" : "N", dec_swreg15);
}
}
- if (s && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
- seq_puts(s,
+ if (e && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
+ hl_engine_data_sprintf(e,
"\nPCIe DEC is_idle VSI_CMD_SWREG15\n"
"-------- ------- ---------------\n");
@@ -6395,12 +6501,13 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, pcie_dec_fmt, i, is_eng_idle ? "Y" : "N", dec_swreg15);
+ if (e)
+ hl_engine_data_sprintf(e, pcie_dec_fmt, i,
+ is_eng_idle ? "Y" : "N", dec_swreg15);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nCORE ROT is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
"---- ---- ------- ------------ ---------- -------------\n");
@@ -6419,8 +6526,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, "-");
}
@@ -6443,22 +6550,6 @@ static void gaudi2_hw_queues_unlock(struct hl_device *hdev)
spin_unlock(&gaudi2->hw_queues_lock);
}
-static void gaudi2_kdma_lock(struct hl_device *hdev, int dcore_id)
- __acquires(&gaudi2->kdma_lock)
-{
- struct gaudi2_device *gaudi2 = hdev->asic_specific;
-
- spin_lock(&gaudi2->kdma_lock);
-}
-
-static void gaudi2_kdma_unlock(struct hl_device *hdev, int dcore_id)
- __releases(&gaudi2->kdma_lock)
-{
- struct gaudi2_device *gaudi2 = hdev->asic_specific;
-
- spin_unlock(&gaudi2->kdma_lock);
-}
-
static u32 gaudi2_get_pci_id(struct hl_device *hdev)
{
return hdev->pdev->device;
@@ -6725,9 +6816,9 @@ static int gaudi2_mmu_shared_prepare(struct hl_device *hdev, u32 asid)
}
static void gaudi2_tpc_mmu_prepare(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_mmu_data *mmu_data = (struct gaudi2_tpc_mmu_data *)data;
+ struct gaudi2_tpc_mmu_data *mmu_data = ctx->data;
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0);
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_ASID + offset, mmu_data->rw_asid);
@@ -7020,10 +7111,6 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg);
razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg);
}
-
- dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR HBW WR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
- name, razwi_hi, razwi_lo, razwi_xy);
} else {
if (read_razwi_regs) {
razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
@@ -7034,11 +7121,11 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg);
razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg);
}
-
- dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR HBW AR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
- name, razwi_hi, razwi_lo, razwi_xy);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n",
+ name, is_write ? "WR" : "RD", (u64)razwi_hi << 32 | razwi_lo, razwi_xy);
}
static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
@@ -7296,7 +7383,79 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
-static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev,
+static const char *gaudi2_get_initiators_name(u32 rtr_id)
+{
+ switch (rtr_id) {
+ case DCORE0_RTR0:
+ return "DEC0/1/8/9, TPC24, PDMA0/1, PMMU, PCIE_IF, EDMA0/2, HMMU0/2/4/6, CPU";
+ case DCORE0_RTR1:
+ return "TPC0/1";
+ case DCORE0_RTR2:
+ return "TPC2/3";
+ case DCORE0_RTR3:
+ return "TPC4/5";
+ case DCORE0_RTR4:
+ return "MME0_SBTE0/1";
+ case DCORE0_RTR5:
+ return "MME0_WAP0/SBTE2";
+ case DCORE0_RTR6:
+ return "MME0_CTRL_WR/SBTE3";
+ case DCORE0_RTR7:
+ return "MME0_WAP1/CTRL_RD/SBTE4";
+ case DCORE1_RTR0:
+ return "MME1_WAP1/CTRL_RD/SBTE4";
+ case DCORE1_RTR1:
+ return "MME1_CTRL_WR/SBTE3";
+ case DCORE1_RTR2:
+ return "MME1_WAP0/SBTE2";
+ case DCORE1_RTR3:
+ return "MME1_SBTE0/1";
+ case DCORE1_RTR4:
+ return "TPC10/11";
+ case DCORE1_RTR5:
+ return "TPC8/9";
+ case DCORE1_RTR6:
+ return "TPC6/7";
+ case DCORE1_RTR7:
+ return "DEC2/3, NIC0/1/2/3/4, ARC_FARM, KDMA, EDMA1/3, HMMU1/3/5/7";
+ case DCORE2_RTR0:
+ return "DEC4/5, NIC5/6/7/8, EDMA4/6, HMMU8/10/12/14, ROT0";
+ case DCORE2_RTR1:
+ return "TPC16/17";
+ case DCORE2_RTR2:
+ return "TPC14/15";
+ case DCORE2_RTR3:
+ return "TPC12/13";
+ case DCORE2_RTR4:
+ return "MME2_SBTE0/1";
+ case DCORE2_RTR5:
+ return "MME2_WAP0/SBTE2";
+ case DCORE2_RTR6:
+ return "MME2_CTRL_WR/SBTE3";
+ case DCORE2_RTR7:
+ return "MME2_WAP1/CTRL_RD/SBTE4";
+ case DCORE3_RTR0:
+ return "MME3_WAP1/CTRL_RD/SBTE4";
+ case DCORE3_RTR1:
+ return "MME3_CTRL_WR/SBTE3";
+ case DCORE3_RTR2:
+ return "MME3_WAP0/SBTE2";
+ case DCORE3_RTR3:
+ return "MME3_SBTE0/1";
+ case DCORE3_RTR4:
+ return "TPC18/19";
+ case DCORE3_RTR5:
+ return "TPC20/21";
+ case DCORE3_RTR6:
+ return "TPC22/23";
+ case DCORE3_RTR7:
+ return "DEC6/7, NIC9/10/11, EDMA5/7, HMMU9/11/13/15, ROT1, PSOC";
+ default:
+ return "N/A";
+ }
+}
+
+static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u32 rtr_id,
u64 rtr_ctrl_base_addr, bool is_write)
{
u32 razwi_hi, razwi_lo;
@@ -7305,50 +7464,47 @@ static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev,
razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI);
razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW WR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
- rtr_ctrl_base_addr, razwi_hi, razwi_lo);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1);
} else {
razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI);
-
razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW AR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
- rtr_ctrl_base_addr, razwi_hi, razwi_lo);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped HBW %s error, rtr id %u, address %#llx\n",
+ is_write ? "WR" : "RD", rtr_id, (u64)razwi_hi << 32 | razwi_lo);
+
+ dev_err_ratelimited(hdev->dev,
+ "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
}
-static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev,
- u64 rtr_ctrl_base_addr, bool is_write)
+static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u32 rtr_id,
+ u64 rtr_ctrl_base_addr, bool is_write)
{
u32 razwi_addr;
if (is_write) {
razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW WR error, ctr_base 0x%llx, captured address 0x%x\n",
- rtr_ctrl_base_addr, razwi_addr);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
} else {
razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW AR error, ctr_base 0x%llx, captured address 0x%x\n",
- rtr_ctrl_base_addr, razwi_addr);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n",
+ is_write ? "WR" : "RD", rtr_id, razwi_addr);
+
+ dev_err_ratelimited(hdev->dev,
+ "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
@@ -7366,21 +7522,16 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev)
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
-
- xy = (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_SHIFT;
+ xy = FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info);
dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: Mask %d, WAS_AR %d, WAS_AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_SHIFT,
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_SHIFT,
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_SHIFT, xy,
- (razwi_mask_info &
- PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_SHIFT);
+ "PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info),
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info),
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info),
+ xy,
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info));
+
if (xy == 0) {
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: received event from 0 rtr coordinates\n");
@@ -7410,16 +7561,20 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev)
lbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET);
if (hbw_aw_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, true);
if (hbw_ar_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, false);
if (lbw_aw_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, true);
if (lbw_ar_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, false);
clear:
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
@@ -7811,14 +7966,58 @@ static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_
gaudi2_dma_core_interrupts_cause[i]);
}
+static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev)
+{
+ u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr;
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+}
+
static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data)
{
int i;
- for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE; i++)
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
- gaudi2_pcie_addr_dec_error_cause[i]);
+ for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
+ if (!(intr_cause_data & BIT_ULL(i)))
+ continue;
+
+ dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
+ gaudi2_pcie_addr_dec_error_cause[i]);
+
+ switch (intr_cause_data & BIT_ULL(i)) {
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ break;
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
+ gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev);
+ break;
+ }
+ }
}
static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data)
@@ -8158,10 +8357,17 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
return true;
}
- dev_err_ratelimited(hdev->dev,
- "System Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Critical(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- sei_data->hdr.is_critical, hbm_mc_sei_cause[cause_idx]);
+ if (sei_data->hdr.is_critical)
+ dev_err(hdev->dev,
+ "System Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
+
+ else
+ dev_err_ratelimited(hdev->dev,
+ "System Non-Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
/* Print error-specific info */
switch (cause_idx) {
@@ -8371,6 +8577,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, skip_reset = false;
int index, sbte_index;
+ u64 event_mask = 0;
u16 event_type;
ctl = le32_to_cpu(eq_entry->hdr.ctl);
@@ -8392,6 +8599,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
fallthrough;
case GAUDI2_EVENT_ROTATOR0_SERR ... GAUDI2_EVENT_ROTATOR1_DERR:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
break;
@@ -8401,21 +8609,25 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
fallthrough;
case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
gaudi2_handle_qman_err(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
gaudi2_handle_arc_farm_sei_err(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
gaudi2_handle_cpu_sei_err(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
@@ -8423,6 +8635,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
@@ -8430,11 +8643,13 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP",
&eq_entry->razwi_with_intr_cause);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_KERNEL_ERR:
@@ -8465,6 +8680,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
(GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_SPI:
@@ -8480,6 +8696,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
(GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
@@ -8492,6 +8709,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_handle_mme_err(hdev, index,
"CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_QMAN_SW_ERROR:
@@ -8502,6 +8720,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
(GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID:
@@ -8512,22 +8731,27 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
(GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_KDMA0_CORE:
gaudi2_handle_kdma_core_event(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
gaudi2_handle_dma_core_event(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
gaudi2_print_pcie_addr_dec_info(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
@@ -8536,25 +8760,30 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
gaudi2_handle_mmu_spi_sei_err(hdev, event_type);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
gaudi2_handle_hif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PMMU_FATAL_0:
gaudi2_handle_pif_fatal(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
gaudi2_ack_psoc_razwi_event_handler(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE ... GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
@@ -8563,25 +8792,31 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ECC:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_L2_RAM_ECC:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
@@ -8595,17 +8830,24 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
gaudi2_handle_mme_sbte_err(hdev, index, sbte_index,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ break;
case GAUDI2_EVENT_PSOC_PRSTN_FALL:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FATAL_ERR:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_TPC0_BMON_SPMU:
case GAUDI2_EVENT_TPC1_BMON_SPMU:
@@ -8657,6 +8899,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC8_BMON_SPMU:
case GAUDI2_EVENT_DEC9_BMON_SPMU:
case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
@@ -8664,43 +8907,53 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
gaudi2_print_clk_change_info(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
/* Do nothing- FW will handle it */
break;
case GAUDI2_EVENT_PCIE_P2P_MSIX:
gaudi2_handle_pcie_p2p_msix(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
skip_reset = !gaudi2_handle_sm_err(hdev, index);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_ARC_DCCM_FULL:
hl_arc_event_handle(hdev, &eq_entry->arc_data);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
default:
@@ -8716,15 +8969,22 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
if (!gaudi2_irq_map_table[event_type].msg)
hl_fw_unmask_irq(hdev, event_type);
+ if (event_mask)
+ hl_notifier_event_send_all(hdev, event_mask);
+
return;
reset_device:
if (hdev->hard_reset_on_fw_events) {
hl_device_reset(hdev, reset_flags);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
} else {
if (!gaudi2_irq_map_table[event_type].msg)
hl_fw_unmask_irq(hdev, event_type);
}
+
+ if (event_mask)
+ hl_notifier_event_send_all(hdev, event_mask);
}
static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val)
@@ -9090,19 +9350,17 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
}
/* Create mapping on asic side */
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
hl_mmu_invalidate_cache_range(hdev, false,
MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc) {
dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
goto unreserve_va;
}
- hdev->asic_funcs->kdma_lock(hdev, 0);
-
/* Enable MMU on KDMA */
gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
@@ -9130,13 +9388,11 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
- hdev->asic_funcs->kdma_unlock(hdev, 0);
-
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
unreserve_va:
hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
free_data_buffer:
@@ -9189,11 +9445,11 @@ static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *c
goto destroy_internal_cb_pool;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto unreserve_internal_cb_pool;
@@ -9218,11 +9474,11 @@ static void gaudi2_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
return;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9336,7 +9592,7 @@ static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
static u32 gaudi2_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, u32 size, bool eb)
{
- struct hl_cb *cb = (struct hl_cb *) data;
+ struct hl_cb *cb = data;
struct packet_msg_short *pkt;
u32 value, ctl, pkt_size = sizeof(*pkt);
@@ -9429,7 +9685,7 @@ static u32 gaudi2_add_fence_pkt(struct packet_fence *pkt)
static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop)
{
- struct hl_cb *cb = (struct hl_cb *) prop->data;
+ struct hl_cb *cb = prop->data;
void *buf = (void *) (uintptr_t) (cb->kernel_address);
u64 monitor_base, fence_addr = 0;
@@ -9481,7 +9737,7 @@ static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_propert
static void gaudi2_reset_sob(struct hl_device *hdev, void *data)
{
- struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+ struct hl_hw_sob *hw_sob = data;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, hw_sob->sob_id);
@@ -9724,7 +9980,7 @@ static int gaudi2_get_mmu_base(struct hl_device *hdev, u64 mmu_id, u32 *mmu_base
static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id)
{
- bool is_pmmu = (mmu_id == HW_CAP_PMMU ? true : false);
+ bool is_pmmu = (mmu_id == HW_CAP_PMMU);
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base;
@@ -9881,6 +10137,17 @@ static int gaudi2_get_monitor_dump(struct hl_device *hdev, void *data)
return -EOPNOTSUPP;
}
+int gaudi2_send_device_activity(struct hl_device *hdev, bool open)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q) || hdev->fw_major_version < 37)
+ return 0;
+
+ /* TODO: add check for FW version using minor ver once it's known */
+ return hl_fw_send_device_activity(hdev, open);
+}
+
static const struct hl_asic_funcs gaudi2_funcs = {
.early_init = gaudi2_early_init,
.early_fini = gaudi2_early_fini,
@@ -9927,11 +10194,9 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.send_heartbeat = gaudi2_send_heartbeat,
.debug_coresight = gaudi2_debug_coresight,
.is_device_idle = gaudi2_is_device_idle,
- .non_hard_reset_late_init = gaudi2_non_hard_reset_late_init,
+ .compute_reset_late_init = gaudi2_compute_reset_late_init,
.hw_queues_lock = gaudi2_hw_queues_lock,
.hw_queues_unlock = gaudi2_hw_queues_unlock,
- .kdma_lock = gaudi2_kdma_lock,
- .kdma_unlock = gaudi2_kdma_unlock,
.get_pci_id = gaudi2_get_pci_id,
.get_eeprom_data = gaudi2_get_eeprom_data,
.get_monitor_dump = gaudi2_get_monitor_dump,
@@ -9978,6 +10243,8 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.mmu_get_real_page_size = gaudi2_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
+ .set_engine_cores = gaudi2_set_engine_cores,
+ .send_device_activity = gaudi2_send_device_activity,
};
void gaudi2_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
index e4bc4009f05b..a99c348bbf39 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
@@ -15,7 +15,6 @@
#include "../include/gaudi2/gaudi2_packets.h"
#include "../include/gaudi2/gaudi2_fw_if.h"
#include "../include/gaudi2/gaudi2_async_events.h"
-#include "../include/gaudi2/gaudi2_async_virt_events.h"
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
@@ -140,9 +139,6 @@
#define VA_HOST_SPACE_HPAGE_START 0xFFF0800000000000ull
#define VA_HOST_SPACE_HPAGE_END 0xFFF1000000000000ull /* 140TB */
-#define VA_HOST_SPACE_USER_MAPPED_CB_START 0xFFF1000000000000ull
-#define VA_HOST_SPACE_USER_MAPPED_CB_END 0xFFF1000100000000ull /* 4GB */
-
/* 140TB */
#define VA_HOST_SPACE_PAGE_SIZE (VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
@@ -458,7 +454,6 @@ struct dup_block_ctx {
* the user can map.
* @lfsr_rand_seeds: array of MME ACC random seeds to set.
* @hw_queues_lock: protects the H/W queues from concurrent access.
- * @kdma_lock: protects the KDMA engine from concurrent access.
* @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
* this memory region should be write-only.
* currently used for HBW QMAN writes which is
@@ -510,9 +505,6 @@ struct dup_block_ctx {
* @flush_db_fifo: flag to force flush DB FIFO after a write.
* @hbm_cfg: HBM subsystem settings
* @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
- * @kdma_lock_mutex: used by simulator instead of kdma_lock.
- * @use_deprecated_event_mappings: use old event mappings which are about to be
- * deprecated
*/
struct gaudi2_device {
int (*cpucp_info_get)(struct hl_device *hdev);
@@ -521,7 +513,6 @@ struct gaudi2_device {
int lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
spinlock_t hw_queues_lock;
- spinlock_t kdma_lock;
void *scratchpad_kernel_address;
dma_addr_t scratchpad_bus_address;
@@ -562,5 +553,6 @@ void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32
u32 offended_addr);
int gaudi2_init_security(struct hl_device *hdev);
void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
+int gaudi2_send_device_activity(struct hl_device *hdev, bool open);
#endif /* GAUDI2P_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
index eed16d642a5a..e9ac87828221 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
@@ -51,12 +51,18 @@
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
-#define PDMA1_QMAN_ENABLE \
+#define PDMA0_QMAN_ENABLE \
((0x3 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+#define PDMA1_QMAN_ENABLE \
+ ((0x1 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+
/* QM_IDLE_MASK is valid for all engines QM idle check */
#define QM_IDLE_MASK (DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
@@ -138,4 +144,17 @@
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_SHIFT 15
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK 0x8000
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_SHIFT 0
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK 0x1
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_SHIFT 1
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK 0x2
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_SHIFT 2
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK 0x4
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK_SHIFT 3
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK_MASK 0x8
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK_SHIFT 4
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK_MASK 0x10
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK_SHIFT 5
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK_MASK 0x20
+
#endif /* GAUDI2_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
index 89a06ff5ba34..c6906fb14229 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
@@ -2559,6 +2559,10 @@ static const u32 gaudi2_pb_pcie[] = {
mmPCIE_WRAP_BASE,
};
+static const u32 gaudi2_pb_pcie_unsecured_regs[] = {
+ mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0,
+};
+
static const u32 gaudi2_pb_thermal_sensor0[] = {
mmDCORE0_XFT_BASE,
mmDCORE0_TSTDVS_BASE,
@@ -2583,9 +2587,9 @@ struct gaudi2_tpc_pb_data {
};
static void gaudi2_config_tpcs_glbl_sec(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_pb_data *pb_data = (struct gaudi2_tpc_pb_data *)data;
+ struct gaudi2_tpc_pb_data *pb_data = ctx->data;
hl_config_glbl_sec(hdev, gaudi2_pb_dcr0_tpc0, pb_data->glbl_sec,
offset, pb_data->block_array_size);
@@ -2660,15 +2664,14 @@ static int gaudi2_init_pb_tpc(struct hl_device *hdev)
struct gaudi2_tpc_arc_pb_data {
u32 unsecured_regs_arr_size;
u32 arc_regs_arr_size;
- int rc;
};
static void gaudi2_config_tpcs_pb_ranges(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_arc_pb_data *pb_data = (struct gaudi2_tpc_arc_pb_data *)data;
+ struct gaudi2_tpc_arc_pb_data *pb_data = ctx->data;
- pb_data->rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
+ ctx->rc = hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
offset, gaudi2_pb_dcr0_tpc0_arc,
pb_data->arc_regs_arr_size,
gaudi2_pb_dcr0_tpc0_arc_unsecured_regs,
@@ -2683,12 +2686,12 @@ static int gaudi2_init_pb_tpc_arc(struct hl_device *hdev)
tpc_arc_pb_data.arc_regs_arr_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
tpc_arc_pb_data.unsecured_regs_arr_size =
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc_unsecured_regs);
- tpc_arc_pb_data.rc = 0;
+
tpc_iter.fn = &gaudi2_config_tpcs_pb_ranges;
tpc_iter.data = &tpc_arc_pb_data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
- return tpc_arc_pb_data.rc;
+ return tpc_iter.rc;
}
static int gaudi2_init_pb_sm_objs(struct hl_device *hdev)
@@ -3419,7 +3422,8 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie),
- NULL, HL_PB_NA);
+ gaudi2_pb_pcie_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_pcie_unsecured_regs));
/* Thermal Sensor.
* Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
@@ -3547,9 +3551,9 @@ struct gaudi2_ack_pb_tpc_data {
};
static void gaudi2_ack_pb_tpc_config(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_ack_pb_tpc_data *pb_data = (struct gaudi2_ack_pb_tpc_data *)data;
+ struct gaudi2_ack_pb_tpc_data *pb_data = ctx->data;
hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_tpc0, pb_data->tpc_regs_array_size);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index db4487c33582..5ef9e3ca97a6 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -916,26 +916,11 @@ int goya_late_init(struct hl_device *hdev)
*/
void goya_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
struct goya_device *goya = hdev->asic_specific;
- int i = 0;
cancel_delayed_work_sync(&goya->goya_work->work_freq);
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static void goya_set_pci_memory_regions(struct hl_device *hdev)
@@ -1040,6 +1025,7 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_prop.supports_compute_reset = true;
hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
+ hdev->supports_ctx_switch = true;
hdev->asic_funcs->set_pci_memory_regions(hdev);
@@ -4559,7 +4545,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
return rc;
}
-static int goya_non_hard_reset_late_init(struct hl_device *hdev)
+static int goya_compute_reset_late_init(struct hl_device *hdev)
{
/*
* Unmask all IRQs since some could have been received
@@ -5137,8 +5123,8 @@ int goya_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
@@ -5149,9 +5135,9 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
u64 offset;
int i;
- if (s)
- seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
- "--- ------- ------------ -------------\n");
+ if (e)
+ hl_engine_data_sprintf(e, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
+ "--- ------- ------------ -------------\n");
offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
@@ -5164,13 +5150,13 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
- if (s)
- seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
"--- ------- ------------ -------------- ----------\n");
@@ -5187,13 +5173,13 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
- if (s)
- seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
"--- ------- ------------ -------------- -----------\n");
@@ -5207,10 +5193,10 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_MME_0, mask);
- if (s) {
- seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ if (e) {
+ hl_engine_data_sprintf(e, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
- seq_puts(s, "\n");
+ hl_engine_data_sprintf(e, "\n");
}
return is_idle;
@@ -5434,6 +5420,11 @@ static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
return -EOPNOTSUPP;
}
+static int goya_send_device_activity(struct hl_device *hdev, bool open)
+{
+ return 0;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5478,11 +5469,9 @@ static const struct hl_asic_funcs goya_funcs = {
.send_heartbeat = goya_send_heartbeat,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
- .non_hard_reset_late_init = goya_non_hard_reset_late_init,
+ .compute_reset_late_init = goya_compute_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
- .kdma_lock = NULL,
- .kdma_unlock = NULL,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
.get_monitor_dump = goya_get_monitor_dump,
@@ -5528,6 +5517,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
+ .send_device_activity = goya_send_device_activity,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index abf40e1c4965..baa5aa43b6f4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -629,6 +629,12 @@ enum pq_init_status {
* CPUCP_PACKET_ENGINE_CORE_ASID_SET -
* Packet to perform engine core ASID configuration
*
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
* CPUCP_PACKET_MONITOR_DUMP_GET -
* Get monitors registers dump from the CpuCP kernel.
* The CPU will put the registers dump in the a buffer allocated by the driver
@@ -636,6 +642,10 @@ enum pq_init_status {
* passes the max size it allows the CpuCP to write to the structure, to prevent
* data corruption in case of mismatched driver/FW versions.
* Relevant only to Gaudi.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
*/
enum cpucp_packet_id {
@@ -687,10 +697,17 @@ enum cpucp_packet_id {
CPUCP_PACKET_RESERVED, /* not used */
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
CPUCP_PACKET_RESERVED3, /* not used */
CPUCP_PACKET_RESERVED4, /* not used */
- CPUCP_PACKET_RESERVED5, /* not used */
CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -783,6 +800,9 @@ struct cpucp_packet {
* result cannot be used to hold general purpose data.
*/
__le32 status_mask;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
};
/* For NIC requests */
@@ -813,10 +833,25 @@ enum cpucp_led_index {
CPUCP_LED2_INDEX
};
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support Goya and Gaudi platform.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. This is
+ * supported Greco onwards.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc. Supported Greco onwards.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
enum cpucp_packet_rc {
cpucp_packet_success,
cpucp_packet_invalid,
- cpucp_packet_fault
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
};
/*
@@ -1193,6 +1228,70 @@ enum cpu_reset_status {
CPU_RST_STATUS_SOFT_RST_DONE = 1,
};
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
/*
* struct dcore_monitor_regs_data - DCORE monitor regs data.
* the structure follows sync manager block layout. relevant only to Gaudi.
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index a3594119bc51..e0ea51cc7475 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -34,6 +34,7 @@ enum cpu_boot_err {
CPU_BOOT_ERR_BINNING_FAIL = 19,
CPU_BOOT_ERR_TPM_FAIL = 20,
CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
CPU_BOOT_ERR_ENABLED = 31,
CPU_BOOT_ERR_SCND_EN = 63,
CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
@@ -115,6 +116,9 @@ enum cpu_boot_err {
* CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
* sensor.
*
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -139,6 +143,7 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
@@ -426,7 +431,9 @@ struct cpu_dyn_regs {
__le32 gic_host_ints_irq;
__le32 gic_host_soft_rst_irq;
__le32 gic_rot_qm_irq_ctrl;
- __le32 reserved1[22]; /* reserve for future use */
+ __le32 cpu_rst_status;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved1[20]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -465,6 +472,26 @@ enum comms_msg_type {
HL_COMMS_BINNING_CONF_TYPE = 3,
};
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask - TPC binning information
+ * @dec_mask - Decoder binning information
+ * @hbm_mask - HBM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @reserved - reserved field for 64 bit alignment
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask;
+ __le32 dec_mask;
+ __le32 hbm_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 reserved;
+};
+
/* TODO: remove this struct after the code is updated to use message */
/* this is the comms descriptor header - meta data */
struct comms_desc_header {
@@ -525,13 +552,7 @@ struct lkd_fw_comms_msg {
struct {
__u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
};
- struct {
- __le64 tpc_binning_conf;
- __le32 dec_binning_conf;
- __le32 hbm_binning_conf;
- __le32 edma_binning_conf;
- __le32 mme_redundancy_conf; /* use MME_REDUNDANT_COLUMN */
- };
+ struct lkd_fw_binning_info binning_info;
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index d0e2c68a639f..6aa1b1412462 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -132,6 +132,7 @@
#include "dcore0_mme_ctrl_lo_arch_tensor_a_regs.h"
#include "dcore0_mme_ctrl_lo_arch_tensor_b_regs.h"
#include "dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h"
+#include "pcie_wrap_special_regs.h"
#include "pdma0_qm_masks.h"
#include "pdma0_core_masks.h"
@@ -239,6 +240,7 @@
#define SFT_IF_RTR_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
#define ARC_HALT_REQ_OFFSET (mmARC_FARM_ARC0_AUX_RUN_HALT_REQ - mmARC_FARM_ARC0_AUX_BASE)
+#define ARC_HALT_ACK_OFFSET (mmARC_FARM_ARC0_AUX_RUN_HALT_ACK - mmARC_FARM_ARC0_AUX_BASE)
#define ARC_REGION_CFG_OFFSET(region) \
(mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_0 + (region * 4) - mmARC_FARM_ARC0_AUX_BASE)
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
new file mode 100644
index 000000000000..46558e7a7f63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_
+#define ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_WRAP_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_0 0x4C01E80
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_1 0x4C01E84
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_2 0x4C01E88
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_3 0x4C01E8C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_4 0x4C01E90
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_5 0x4C01E94
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_6 0x4C01E98
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_7 0x4C01E9C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_8 0x4C01EA0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_9 0x4C01EA4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_10 0x4C01EA8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_11 0x4C01EAC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_12 0x4C01EB0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_13 0x4C01EB4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_14 0x4C01EB8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_15 0x4C01EBC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_16 0x4C01EC0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_17 0x4C01EC4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_18 0x4C01EC8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_19 0x4C01ECC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_20 0x4C01ED0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_21 0x4C01ED4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_22 0x4C01ED8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_23 0x4C01EDC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_24 0x4C01EE0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_25 0x4C01EE4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_26 0x4C01EE8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_27 0x4C01EEC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_28 0x4C01EF0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_29 0x4C01EF4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_30 0x4C01EF8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_31 0x4C01EFC
+
+#define mmPCIE_WRAP_SPECIAL_MEM_GW_DATA 0x4C01F00
+
+#define mmPCIE_WRAP_SPECIAL_MEM_GW_REQ 0x4C01F04
+
+#define mmPCIE_WRAP_SPECIAL_MEM_NUMOF 0x4C01F0C
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_SEL 0x4C01F10
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_CTL 0x4C01F14
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_MASK 0x4C01F18
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_GLBL_ERR_MASK 0x4C01F1C
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_STS 0x4C01F20
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_ADDR 0x4C01F24
+
+#define mmPCIE_WRAP_SPECIAL_MEM_RM 0x4C01F28
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_MASK 0x4C01F40
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_ADDR 0x4C01F44
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_CAUSE 0x4C01F48
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0 0x4C01F60
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_1 0x4C01F64
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_2 0x4C01F68
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_3 0x4C01F6C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_0 0x4C01F80
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_1 0x4C01F84
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_2 0x4C01F88
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_3 0x4C01F8C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_4 0x4C01F90
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_5 0x4C01F94
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_6 0x4C01F98
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_7 0x4C01F9C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_8 0x4C01FA0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_9 0x4C01FA4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_10 0x4C01FA8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_11 0x4C01FAC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_12 0x4C01FB0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_13 0x4C01FB4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_14 0x4C01FB8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_15 0x4C01FBC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_16 0x4C01FC0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_17 0x4C01FC4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_18 0x4C01FC8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_19 0x4C01FCC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_20 0x4C01FD0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_21 0x4C01FD4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_22 0x4C01FD8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_23 0x4C01FDC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_24 0x4C01FE0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_25 0x4C01FE4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_26 0x4C01FE8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_27 0x4C01FEC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_28 0x4C01FF0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_29 0x4C01FF4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_30 0x4C01FF8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_31 0x4C01FFC
+
+#endif /* ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
deleted file mode 100644
index 6d6ed7838a64..000000000000
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2022 HabanaLabs, Ltd.
- * All Rights Reserved.
- *
- */
-
-#ifndef __GAUDI2_ASYNC_VIRT_EVENTS_H_
-#define __GAUDI2_ASYNC_VIRT_EVENTS_H_
-
-enum gaudi2_async_virt_event_id {
- GAUDI2_EVENT_NIC3_QM1_OLD = 1206,
- GAUDI2_EVENT_NIC4_QM0_OLD = 1207,
- GAUDI2_EVENT_NIC4_QM1_OLD = 1208,
- GAUDI2_EVENT_NIC5_QM0_OLD = 1209,
- GAUDI2_EVENT_NIC5_QM1_OLD = 1210,
- GAUDI2_EVENT_NIC6_QM0_OLD = 1211,
- GAUDI2_EVENT_NIC6_QM1_OLD = 1212,
- GAUDI2_EVENT_NIC7_QM0_OLD = 1213,
- GAUDI2_EVENT_NIC7_QM1_OLD = 1214,
- GAUDI2_EVENT_NIC8_QM0_OLD = 1215,
- GAUDI2_EVENT_NIC8_QM1_OLD = 1216,
- GAUDI2_EVENT_NIC9_QM0_OLD = 1217,
- GAUDI2_EVENT_NIC9_QM1_OLD = 1218,
- GAUDI2_EVENT_NIC10_QM0_OLD = 1219,
- GAUDI2_EVENT_NIC10_QM1_OLD = 1220,
- GAUDI2_EVENT_NIC11_QM0_OLD = 1221,
- GAUDI2_EVENT_NIC11_QM1_OLD = 1222,
- GAUDI2_EVENT_CPU_PKT_SANITY_FAILED_OLD = 1223,
- GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0_OLD = 1224,
- GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG1_OLD = 1225,
- GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG0_OLD = 1226,
- GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG1_OLD = 1227,
- GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG0_OLD = 1228,
- GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG1_OLD = 1229,
- GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG0_OLD = 1230,
- GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG1_OLD = 1231,
- GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG0_OLD = 1232,
- GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG1_OLD = 1233,
- GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG0_OLD = 1234,
- GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG1_OLD = 1235,
- GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG0_OLD = 1236,
- GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG1_OLD = 1237,
- GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG0_OLD = 1238,
- GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG1_OLD = 1239,
- GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG0_OLD = 1240,
- GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG1_OLD = 1241,
- GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG0_OLD = 1242,
- GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG1_OLD = 1243,
- GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG0_OLD = 1244,
- GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG1_OLD = 1245,
- GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0_OLD = 1246,
- GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1_OLD = 1247,
- GAUDI2_EVENT_ARC_DCCM_FULL_OLD = 1248,
-};
-
-#endif /* __GAUDI2_ASYNC_VIRT_EVENTS_H_ */
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 2c4bb6d6e1a0..1cb71df966a4 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -424,7 +424,7 @@ static int ics932s401_detect(struct i2c_client *client,
if (revision != ICS932S401_REV)
dev_info(&adapter->dev, "Unknown revision %d\n", revision);
- strlcpy(info->type, "ics932s401", I2C_NAME_SIZE);
+ strscpy(info->type, "ics932s401", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/misc/mchp_pci1xxxx/Kconfig b/drivers/misc/mchp_pci1xxxx/Kconfig
new file mode 100644
index 000000000000..4abb47de7219
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/Kconfig
@@ -0,0 +1,13 @@
+config GP_PCI1XXXX
+ tristate "Microchip PCI1XXXX PCIe to GPIO Expander + OTP/EEPROM manager"
+ depends on PCI
+ depends on GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select AUXILIARY_BUS
+ help
+ PCI1XXXX is a PCIe GEN 3 switch with one of the endpoints having
+ multiple functions and one of the functions is a GPIO controller
+ which also has registers to interface with the OTP and EEPROM.
+ Select yes, no or module here to include or exclude the driver
+ for the GPIO function.
+
diff --git a/drivers/misc/mchp_pci1xxxx/Makefile b/drivers/misc/mchp_pci1xxxx/Makefile
new file mode 100644
index 000000000000..fc4615cfe28b
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
new file mode 100644
index 000000000000..32af2b14ff34
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 Microchip Technology Inc.
+
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include "mchp_pci1xxxx_gp.h"
+
+struct aux_bus_device {
+ struct auxiliary_device_wrapper *aux_device_wrapper[2];
+};
+
+static DEFINE_IDA(gp_client_ida);
+static const char aux_dev_otp_e2p_name[15] = "gp_otp_e2p";
+static const char aux_dev_gpio_name[15] = "gp_gpio";
+
+static void gp_auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device_wrapper *aux_device_wrapper =
+ (struct auxiliary_device_wrapper *)container_of(dev,
+ struct auxiliary_device_wrapper, aux_dev.dev);
+
+ ida_free(&gp_client_ida, aux_device_wrapper->aux_dev.id);
+ kfree(aux_device_wrapper);
+}
+
+static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct aux_bus_device *aux_bus;
+ int retval;
+
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
+ aux_bus = devm_kzalloc(&pdev->dev, sizeof(*aux_bus), GFP_KERNEL);
+ if (!aux_bus)
+ return -ENOMEM;
+
+ aux_bus->aux_device_wrapper[0] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[0]),
+ GFP_KERNEL);
+ if (!aux_bus->aux_device_wrapper[0])
+ return -ENOMEM;
+
+ retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ if (retval < 0)
+ goto err_ida_alloc_0;
+
+ aux_bus->aux_device_wrapper[0]->aux_dev.name = aux_dev_otp_e2p_name;
+ aux_bus->aux_device_wrapper[0]->aux_dev.dev.parent = &pdev->dev;
+ aux_bus->aux_device_wrapper[0]->aux_dev.dev.release = gp_auxiliary_device_release;
+ aux_bus->aux_device_wrapper[0]->aux_dev.id = retval;
+
+ aux_bus->aux_device_wrapper[0]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
+ aux_bus->aux_device_wrapper[0]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
+
+ retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ if (retval < 0)
+ goto err_aux_dev_init_0;
+
+ retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ if (retval)
+ goto err_aux_dev_add_0;
+
+ aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
+ GFP_KERNEL);
+ if (!aux_bus->aux_device_wrapper[1])
+ return -ENOMEM;
+
+ retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ if (retval < 0)
+ goto err_ida_alloc_1;
+
+ aux_bus->aux_device_wrapper[1]->aux_dev.name = aux_dev_gpio_name;
+ aux_bus->aux_device_wrapper[1]->aux_dev.dev.parent = &pdev->dev;
+ aux_bus->aux_device_wrapper[1]->aux_dev.dev.release = gp_auxiliary_device_release;
+ aux_bus->aux_device_wrapper[1]->aux_dev.id = retval;
+
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
+
+ retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ retval = pci_irq_vector(pdev, 0);
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ pdev->irq = retval;
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.irq_num = pdev->irq;
+
+ retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ if (retval)
+ goto err_aux_dev_add_1;
+
+ pci_set_drvdata(pdev, aux_bus);
+ pci_set_master(pdev);
+
+ return 0;
+
+err_aux_dev_add_1:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+
+err_aux_dev_init_1:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
+
+err_ida_alloc_1:
+ kfree(aux_bus->aux_device_wrapper[1]);
+
+err_aux_dev_add_0:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+
+err_aux_dev_init_0:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
+
+err_ida_alloc_0:
+ kfree(aux_bus->aux_device_wrapper[0]);
+
+ return retval;
+}
+
+static void gp_aux_bus_remove(struct pci_dev *pdev)
+{
+ struct aux_bus_device *aux_bus = pci_get_drvdata(pdev);
+
+ auxiliary_device_delete(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ auxiliary_device_delete(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+}
+
+static const struct pci_device_id pci1xxxx_tbl[] = {
+ { PCI_DEVICE(0x1055, 0xA005) },
+ { PCI_DEVICE(0x1055, 0xA015) },
+ { PCI_DEVICE(0x1055, 0xA025) },
+ { PCI_DEVICE(0x1055, 0xA035) },
+ { PCI_DEVICE(0x1055, 0xA045) },
+ { PCI_DEVICE(0x1055, 0xA055) },
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pci1xxxx_tbl);
+
+static struct pci_driver pci1xxxx_gp_driver = {
+ .name = "PCI1xxxxGP",
+ .id_table = pci1xxxx_tbl,
+ .probe = gp_aux_bus_probe,
+ .remove = gp_aux_bus_remove,
+};
+
+module_pci_driver(pci1xxxx_gp_driver);
+
+MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GP expander");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h
new file mode 100644
index 000000000000..37eec73b20d7
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022 Microchip Technology Inc. */
+
+#ifndef _GPIO_PCI1XXXX_H
+#define _GPIO_PCI1XXXX_H
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+
+/* Perform operations like variable length write, read and write with read back for OTP / EEPROM
+ * Perform bit mode write in OTP
+ */
+
+struct gp_aux_data_type {
+ int irq_num;
+ resource_size_t region_start;
+ resource_size_t region_length;
+};
+
+struct auxiliary_device_wrapper {
+ struct auxiliary_device aux_dev;
+ struct gp_aux_data_type gp_aux_data;
+};
+
+#endif
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
new file mode 100644
index 000000000000..3389803cb281
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 Microchip Technology Inc.
+// pci1xxxx gpio driver
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/bio.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mchp_pci1xxxx_gp.h"
+
+#define PCI1XXXX_NR_PINS 93
+#define PERI_GEN_RESET 0
+#define OUT_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400)
+#define INP_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x10)
+#define OUT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x20)
+#define INP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x30)
+#define PULLUP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x40)
+#define PULLDOWN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x50)
+#define OPENDRAIN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x60)
+#define WAKEMASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x70)
+#define MODE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x80)
+#define INTR_LO_TO_HI_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0x90)
+#define INTR_HI_TO_LO_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0xA0)
+#define INTR_LEVEL_CONFIG_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xB0)
+#define INTR_LEVEL_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xC0)
+#define INTR_STAT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xD0)
+#define DEBOUNCE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xE0)
+#define PIO_GLOBAL_CONFIG_OFFSET (0x400 + 0xF0)
+#define PIO_PCI_CTRL_REG_OFFSET (0x400 + 0xF4)
+#define INTR_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x100)
+#define INTR_STATUS_OFFSET(x) (((x) * 4) + 0x400 + 0xD0)
+
+struct pci1xxxx_gpio {
+ struct auxiliary_device *aux_dev;
+ void __iomem *reg_base;
+ struct gpio_chip gpio;
+ spinlock_t lock;
+ int irq_base;
+};
+
+static int pci1xxxx_gpio_get_direction(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ u32 data;
+ int ret = -EINVAL;
+
+ data = readl(priv->reg_base + INP_EN_OFFSET(nr));
+ if (data & BIT(nr % 32)) {
+ ret = 1;
+ } else {
+ data = readl(priv->reg_base + OUT_EN_OFFSET(nr));
+ if (data & BIT(nr % 32))
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline void pci1xxx_assign_bit(void __iomem *base_addr, unsigned int reg_offset,
+ unsigned int bitpos, bool set)
+{
+ u32 data;
+
+ data = readl(base_addr + reg_offset);
+ if (set)
+ data |= BIT(bitpos);
+ else
+ data &= ~BIT(bitpos);
+ writel(data, base_addr + reg_offset);
+}
+
+static int pci1xxxx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), true);
+ pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_get(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+
+ return (readl(priv->reg_base + INP_OFFSET(nr)) >> (nr % 32)) & 1;
+}
+
+static int pci1xxxx_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned int nr, int val)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), false);
+ pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), true);
+ data = readl(priv->reg_base + OUT_OFFSET(nr));
+ if (val)
+ data |= (1 << (nr % 32));
+ else
+ data &= ~(1 << (nr % 32));
+ writel(data, priv->reg_base + OUT_OFFSET(nr));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void pci1xxxx_gpio_set(struct gpio_chip *gpio,
+ unsigned int nr, int val)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, OUT_OFFSET(nr), (nr % 32), val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), true);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), true);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), false);
+ pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), false);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void pci1xxxx_gpio_irq_set_mask(struct irq_data *data, bool set)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INTR_MASK_OFFSET(gpio), (gpio % 32), set);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void pci1xxxx_gpio_irq_mask(struct irq_data *data)
+{
+ pci1xxxx_gpio_irq_set_mask(data, true);
+}
+
+static void pci1xxxx_gpio_irq_unmask(struct irq_data *data)
+{
+ pci1xxxx_gpio_irq_set_mask(data, false);
+}
+
+static int pci1xxxx_gpio_set_type(struct irq_data *data, unsigned int trigger_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bitpos = gpio % 32;
+
+ if (trigger_type & IRQ_TYPE_EDGE_FALLING) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio),
+ bitpos, false);
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
+ bitpos, true);
+ }
+
+ if (trigger_type & IRQ_TYPE_EDGE_RISING) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ false);
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
+ bitpos, true);
+ }
+
+ if (trigger_type & IRQ_TYPE_LEVEL_LOW) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
+ bitpos, true);
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ true);
+ irq_set_handler_locked(data, handle_edge_irq);
+ }
+
+ if (trigger_type & IRQ_TYPE_LEVEL_HIGH) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ true);
+ irq_set_handler_locked(data, handle_edge_irq);
+ }
+
+ if ((!(trigger_type & IRQ_TYPE_LEVEL_LOW)) && (!(trigger_type & IRQ_TYPE_LEVEL_HIGH)))
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio), bitpos, true);
+
+ return true;
+}
+
+static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct pci1xxxx_gpio *priv = dev_id;
+ struct gpio_chip *gc = &priv->gpio;
+ unsigned long int_status = 0;
+ unsigned long flags;
+ u8 pincount;
+ int bit;
+ u8 gpiobank;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ for (gpiobank = 0; gpiobank < 3; gpiobank++) {
+ spin_lock_irqsave(&priv->lock, flags);
+ int_status = readl(priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (gpiobank == 2)
+ pincount = 29;
+ else
+ pincount = 32;
+ for_each_set_bit(bit, &int_status, pincount) {
+ unsigned int irq;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
+ generic_handle_irq(irq);
+ }
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip pci1xxxx_gpio_irqchip = {
+ .name = "pci1xxxx_gpio",
+ .irq_ack = pci1xxxx_gpio_irq_ack,
+ .irq_mask = pci1xxxx_gpio_irq_mask,
+ .irq_unmask = pci1xxxx_gpio_irq_unmask,
+ .irq_set_type = pci1xxxx_gpio_set_type,
+};
+
+static int pci1xxxx_gpio_suspend(struct device *dev)
+{
+ struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 16, true);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 17, false);
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_resume(struct device *dev)
+{
+ struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 17, true);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 16, false);
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
+{
+ struct gpio_chip *gchip = &priv->gpio;
+ struct gpio_irq_chip *girq;
+ int retval;
+
+ gchip->label = dev_name(&priv->aux_dev->dev);
+ gchip->parent = &priv->aux_dev->dev;
+ gchip->owner = THIS_MODULE;
+ gchip->direction_input = pci1xxxx_gpio_direction_input;
+ gchip->direction_output = pci1xxxx_gpio_direction_output;
+ gchip->get_direction = pci1xxxx_gpio_get_direction;
+ gchip->get = pci1xxxx_gpio_get;
+ gchip->set = pci1xxxx_gpio_set;
+ gchip->set_config = pci1xxxx_gpio_set_config;
+ gchip->dbg_show = NULL;
+ gchip->base = -1;
+ gchip->ngpio = PCI1XXXX_NR_PINS;
+ gchip->can_sleep = false;
+
+ retval = devm_request_threaded_irq(&priv->aux_dev->dev, irq,
+ NULL, pci1xxxx_gpio_irq_handler,
+ IRQF_ONESHOT, "PCI1xxxxGPIO", priv);
+
+ if (retval)
+ return retval;
+
+ girq = &priv->gpio.irq;
+ girq->chip = &pci1xxxx_gpio_irqchip;
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct auxiliary_device_wrapper *aux_dev_wrapper;
+ struct gp_aux_data_type *pdata;
+ struct pci1xxxx_gpio *priv;
+ int retval;
+
+ aux_dev_wrapper = (struct auxiliary_device_wrapper *)
+ container_of(aux_dev, struct auxiliary_device_wrapper, aux_dev);
+
+ pdata = &aux_dev_wrapper->gp_aux_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&aux_dev->dev, sizeof(struct pci1xxxx_gpio), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->aux_dev = aux_dev;
+
+ if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start, 0x800, aux_dev->name))
+ return -EBUSY;
+
+ priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start, 0x800);
+ if (!priv->reg_base)
+ return -ENOMEM;
+
+ writel(0x0264, (priv->reg_base + 0x400 + 0xF0));
+
+ retval = pci1xxxx_gpio_setup(priv, pdata->irq_num);
+
+ if (retval < 0)
+ return retval;
+
+ dev_set_drvdata(&aux_dev->dev, priv);
+
+ return devm_gpiochip_add_data(&aux_dev->dev, &priv->gpio, priv);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_gpio_pm_ops, pci1xxxx_gpio_suspend, pci1xxxx_gpio_resume);
+
+static const struct auxiliary_device_id pci1xxxx_gpio_auxiliary_id_table[] = {
+ {.name = "mchp_pci1xxxx_gp.gp_gpio"},
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_gpio_auxiliary_id_table);
+
+static struct auxiliary_driver pci1xxxx_gpio_driver = {
+ .driver = {
+ .name = "PCI1xxxxGPIO",
+ .pm = &pci1xxxx_gpio_pm_ops,
+ },
+ .probe = pci1xxxx_gpio_probe,
+ .id_table = pci1xxxx_gpio_auxiliary_id_table
+};
+module_auxiliary_driver(pci1xxxx_gpio_driver);
+
+MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GPIO controller");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 79305e4acce2..71fbf0bc8453 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -487,7 +487,7 @@ static void mei_nfc(struct mei_cl_device *cldev)
}
dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
- strlcpy(cldev->name, radio_name, sizeof(cldev->name));
+ strscpy(cldev->name, radio_name, sizeof(cldev->name));
disconnect:
mutex_lock(&bus->device_lock);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index 75765e4df4ed..e63cabd0818d 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -68,7 +68,6 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
hw = to_me_hw(dev);
hw->mem_addr = devm_ioremap_resource(device, &adev->bar);
if (IS_ERR(hw->mem_addr)) {
- dev_err(device, "mmio not mapped\n");
ret = PTR_ERR(hw->mem_addr);
goto err;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 9862c6cd3e32..5d0f68b95c29 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -176,7 +176,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
* @dev: the device structure
*
* Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
- * from HICR_HOST_ALIVENESS_REQ register value
+ * HICR_HOST_ALIVENESS_REQ register value
*
* Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value
*/
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 6777c419a8da..d46dba2df5a1 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -257,6 +257,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
if (IS_ERR(ev_ctx))
return PTR_ERR(ev_ctx);
rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
+ if (rc)
+ eventfd_ctx_put(ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 8f786a225dcf..11530b4ec389 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -332,6 +332,22 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
return false;
}
+static int pci_endpoint_test_validate_xfer_params(struct device *dev,
+ struct pci_endpoint_test_xfer_param *param, size_t alignment)
+{
+ if (!param->size) {
+ dev_dbg(dev, "Data size is zero\n");
+ return -EINVAL;
+ }
+
+ if (param->size > SIZE_MAX - alignment) {
+ dev_dbg(dev, "Maximum transfer data size exceeded\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
@@ -363,9 +379,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -497,9 +515,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -595,9 +615,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 9f9af77f8d2e..f1336f43d3bd 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -334,10 +334,6 @@ extern int (*xp_cpu_to_nasid) (int);
extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
-extern u64 xp_nofault_PIOR_target;
-extern int xp_nofault_PIOR(void *);
-extern int xp_error_PIOR(void);
-
extern struct device *xp;
extern enum xp_retval xp_init_uv(void);
extern void xp_exit_uv(void);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8f2de1893245..e71068f7759b 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -324,7 +324,7 @@ static void *qp_alloc_queue(u64 size, u32 flags)
/*
* Copies from a given buffer or iovector to a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
@@ -345,7 +345,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
size_t to_copy;
if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
+ va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
@@ -359,12 +359,12 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
from)) {
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
}
return VMCI_SUCCESS;
@@ -372,7 +372,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
/*
* Copies to a given buffer or iovector from a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
@@ -393,7 +393,7 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to,
int err;
if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
+ va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
@@ -407,12 +407,12 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to,
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
if (err != to_copy) {
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
}
return VMCI_SUCCESS;
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index d6e3c650bd11..cb9506f9cbd0 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -636,7 +636,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
}
for (i = 0; i < nr_pages; i++) {
- addr = kmap(pages[i]);
+ addr = kmap_local_page(pages[i]);
do {
xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) *
@@ -645,6 +645,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
reg++;
} while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
+ kunmap_local(addr);
unpin_user_page(pages[i]);
}
return 0;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ce89611a136e..54cd009aee50 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1140,8 +1140,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
+ unsigned int arg = card->erase_arg;
- mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
+ if (mmc_card_broken_sd_discard(card))
+ arg = SD_ERASE_ARG;
+
+ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 99045e138ba4..cfdd1ff40b86 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -73,6 +73,7 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
@@ -258,4 +259,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ef53a2578824..95fa8fb1d45f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -97,8 +97,8 @@ static void mmc_should_fail_request(struct mmc_host *host,
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
- data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
- data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+ data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))];
+ data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index be4393988086..29b9497936df 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -100,6 +100,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 414aa82abc39..ae7ef2e038be 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -246,7 +246,7 @@ static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
static void sdio_uart_change_speed(struct sdio_uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned char cval, fcr = 0;
unsigned int baud, quot;
@@ -859,7 +859,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
static void sdio_uart_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
unsigned int cflag = tty->termios.c_cflag;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 581614196a84..c78bbc22e0d1 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1858,7 +1858,7 @@ static void dw_mci_start_fault_timer(struct dw_mci *host)
* Try to inject the error at random points during the data transfer.
*/
hrtimer_start(&host->fault_timer,
- ms_to_ktime(prandom_u32() % 25),
+ ms_to_ktime(prandom_u32_max(25)),
HRTIMER_MODE_REL);
}
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 6edbf5c161ab..b970699743e0 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -128,6 +128,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
unsigned int new_clock, clkh_shift = 0;
+ unsigned int new_upper_limit;
int i;
/*
@@ -153,13 +154,20 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
* greater than, new_clock. As we can divide by 1 << i for
* any i in [0, 9] we want the input clock to be as close as
* possible, but no greater than, new_clock << i.
+ *
+ * Add an upper limit of 1/1024 rate higher to the clock rate to fix
+ * clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has
+ * 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request
+ * for 533.333333 MHz will selects a slower 400 MHz due to rounding
+ * error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)).
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
freq = clk_round_rate(ref_clk, new_clock << i);
- if (freq > (new_clock << i)) {
+ new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10);
+ if (freq > new_upper_limit) {
/* Too fast; look for a slightly slower option */
freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
- if (freq > (new_clock << i))
+ if (freq > new_upper_limit)
continue;
}
@@ -181,6 +189,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
unsigned int new_clock)
{
+ unsigned int clk_margin;
u32 clk = 0, clock;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -194,7 +203,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
clock = host->mmc->actual_clock / 512;
- for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ /*
+ * Add a margin of 1/1024 rate higher to the clock rate in order
+ * to avoid clk variable setting a value of 0 due to the margin
+ * provided for actual_clock in renesas_sdhi_clk_update().
+ */
+ clk_margin = new_clock >> 10;
+ for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 46c55ab4884c..b92a408f138d 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -309,7 +309,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 2d2d8260c681..413925bce0ca 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -773,7 +773,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
dev_err(dev, "failed to set clk rate to %luHz: %d\n",
host_clk, err);
- tegra_host->curr_clk_rate = host_clk;
+ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 40d7211485da..4cd37ec45762 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -461,7 +461,7 @@ static int block2mtd_setup(const char *val, const struct kernel_param *kp)
the device (even kmalloc() fails). Deter that work to
block2mtd_setup2(). */
- strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
+ strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
return 0;
#endif
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 5b0ae5ddad74..a7714e3de887 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -300,7 +300,7 @@ static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
}
/**
- * doc_set_data_mode - Sets the flash to normal or reliable data mode
+ * doc_set_reliable_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
@@ -442,7 +442,7 @@ static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
}
/**
- * doc_seek - Set both flash planes to the specified block, page for reading
+ * doc_read_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
* @block1: the second plane block index
@@ -871,6 +871,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ struct mtd_ecc_stats old_stats;
int max_bitflips = 0;
if (buf)
@@ -895,6 +896,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = 0;
skip = from % DOC_LAYOUT_PAGE_SIZE;
mutex_lock(&docg3->cascade->lock);
+ old_stats = mtd->ecc_stats;
while (ret >= 0 && (len > 0 || ooblen > 0)) {
calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
@@ -966,6 +968,12 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
}
out:
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
mutex_unlock(&docg3->cascade->lock);
return ret;
err_in_read:
@@ -1951,7 +1959,7 @@ static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * docg3_probe - Probe the IO space for a DiskOnChip G3 chip
* @pdev: platform device
*
* Probes for a G3 chip at the specified IO space in the platform data
@@ -1974,9 +1982,14 @@ static int __init docg3_probe(struct platform_device *pdev)
dev_err(dev, "No I/O memory resource defined\n");
return ret;
}
- base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+ if (!base) {
+ dev_err(dev, "devm_ioremap dev failed\n");
+ return ret;
+ }
+
cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
GFP_KERNEL);
if (!cascade)
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f655d2905270..8c22064ead38 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -941,7 +941,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
- partition_t *part = (void *)dev;
+ partition_t *part = container_of(dev, struct partition_t, mbd);
u_long sect;
/* Sort of arbitrary: round size down to 4KiB boundary */
@@ -969,7 +969,7 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
static int ftl_discardsect(struct mtd_blktrans_dev *dev,
unsigned long sector, unsigned nr_sects)
{
- partition_t *part = (void *)dev;
+ partition_t *part = container_of(dev, struct partition_t, mbd);
uint32_t bsize = 1 << part->header.EraseUnitSize;
pr_debug("FTL erase sector %ld for %d sectors\n",
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 6b48397c750c..58ca1c21ebe6 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -136,7 +136,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -156,7 +156,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -176,7 +176,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 85eca6a192e6..c73854da5136 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -300,6 +300,9 @@ static const char *of_select_probe_type(struct platform_device *dev)
const char *probe_type;
match = of_match_device(of_flash_match, &dev->dev);
+ if (!match)
+ return NULL;
+
probe_type = match->data;
if (probe_type)
return probe_type;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 05860288a7af..01f1c6792df9 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -688,6 +688,137 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
return ret;
}
+static int mtdchar_read_ioctl(struct mtd_info *mtd,
+ struct mtd_read_req __user *argp)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_read_req req;
+ void __user *usr_data, *usr_oob;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ size_t orig_len, orig_ooblen;
+ int ret = 0;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ orig_len = req.len;
+ orig_ooblen = req.ooblen;
+
+ usr_data = (void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (void __user *)(uintptr_t)req.usr_oob;
+
+ if (!master->_read_oob)
+ return -EOPNOTSUPP;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ req.ecc_stats.uncorrectable_errors = 0;
+ req.ecc_stats.corrected_bitflips = 0;
+ req.ecc_stats.max_bitflips = 0;
+
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
+ if (req.start + req.len > mtd->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_req_stats stats;
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ .stats = &stats,
+ };
+
+ /*
+ * Shorten non-page-aligned, eraseblock-sized reads so that the
+ * read ends on an eraseblock boundary. This is necessary in
+ * order to prevent OOB data for some pages from being
+ * duplicated in the output of non-page-aligned reads requiring
+ * multiple mtd_read_oob() calls to be completed.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
+
+ req.ecc_stats.uncorrectable_errors +=
+ stats.uncorrectable_errors;
+ req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
+ req.ecc_stats.max_bitflips =
+ max(req.ecc_stats.max_bitflips, stats.max_bitflips);
+
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ break;
+
+ if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
+ copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ /*
+ * As multiple iterations of the above loop (and therefore multiple
+ * mtd_read_oob() calls) may be necessary to complete the read request,
+ * adjust the final return code to ensure it accounts for all detected
+ * ECC errors.
+ */
+ if (!ret || mtd_is_bitflip(ret)) {
+ if (req.ecc_stats.uncorrectable_errors > 0)
+ ret = -EBADMSG;
+ else if (req.ecc_stats.corrected_bitflips > 0)
+ ret = -EUCLEAN;
+ }
+
+out:
+ req.len = orig_len - req.len;
+ req.ooblen = orig_ooblen - req.ooblen;
+
+ if (copy_to_user(argp, &req, sizeof(req)))
+ ret = -EFAULT;
+
+ kvfree(datbuf);
+ kvfree(oobbuf);
+
+ return ret;
+}
+
static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -710,6 +841,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
case MEMGETINFO:
case MEMREADOOB:
case MEMREADOOB64:
+ case MEMREAD:
case MEMISLOCKED:
case MEMGETOOBSEL:
case MEMGETBADBLOCK:
@@ -884,6 +1016,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
+ case MEMREAD:
+ {
+ ret = mtdchar_read_ioctl(mtd,
+ (struct mtd_read_req __user *)arg);
+ break;
+ }
+
case MEMLOCK:
{
struct erase_info_user einfo;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index f685a581df48..193428de6a4b 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -836,7 +836,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
/*
* walk the map of the new device once more and fill in
- * in erase region info:
+ * erase region info:
*/
curr_erasesize = subdev[0]->erasesize;
begin = position = 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9b8be9f40dc..18aa54460d36 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1218,6 +1218,34 @@ int __get_mtd_device(struct mtd_info *mtd)
EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
+ * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
+ *
+ * @np: device tree node
+ */
+struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
+{
+ struct mtd_info *mtd = NULL;
+ struct mtd_info *tmp;
+ int err;
+
+ mutex_lock(&mtd_table_mutex);
+
+ err = -EPROBE_DEFER;
+ mtd_for_each_device(tmp) {
+ if (mtd_get_of_node(tmp) == np) {
+ mtd = tmp;
+ err = __get_mtd_device(mtd);
+ break;
+ }
+ }
+
+ mutex_unlock(&mtd_table_mutex);
+
+ return err ? ERR_PTR(err) : mtd;
+}
+EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
+
+/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
* device name
* @name: MTD device name to open
@@ -1624,6 +1652,9 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
+ if (ops->stats)
+ memset(ops->stats, 0, sizeof(*ops->stats));
+
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
else
@@ -1641,6 +1672,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
return ret_code;
if (mtd->ecc_strength == 0)
return 0; /* device lacks ecc */
+ if (ops->stats)
+ ops->stats->max_bitflips = ret_code;
return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index e13d42c0acb0..7ac8ac901306 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -401,7 +401,7 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
/*
* kmsg_size must be aligned to 4096 Bytes, which is limited by
* psblk. The default value of kmsg_size is 64KB. If kmsg_size
- * is larger than erasesize, some errors will occur since mtdpsotre
+ * is larger than erasesize, some errors will occur since mtdpstore
* is designed on it.
*/
if (mtd->erasesize < info->kmsg_size) {
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index dc7f1532a37f..680366616da2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -323,7 +323,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
struct mtdswap_oobdata *data, *data2;
int ret;
loff_t offset;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
offset = mtdswap_eb_offset(d, eb);
@@ -370,7 +370,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
struct mtdswap_oobdata n;
int ret;
loff_t offset;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
@@ -878,7 +878,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
loff_t base, pos;
unsigned int *p1 = (unsigned int *)d->page_buf;
unsigned char *p2 = (unsigned char *)d->oob_buf;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_AUTO_OOB;
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 64af6898131d..db4f93a903e4 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -24,11 +24,8 @@ int nanddev_bbt_init(struct nand_device *nand)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned int nblocks = nanddev_neraseblocks(nand);
- unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
- BITS_PER_LONG);
- nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
- GFP_KERNEL);
+ nand->bbt.cache = bitmap_zalloc(nblocks * bits_per_block, GFP_KERNEL);
if (!nand->bbt.cache)
return -ENOMEM;
@@ -44,7 +41,7 @@ EXPORT_SYMBOL_GPL(nanddev_bbt_init);
*/
void nanddev_bbt_cleanup(struct nand_device *nand)
{
- kfree(nand->bbt.cache);
+ bitmap_free(nand->bbt.cache);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 958bac54b190..f66385faf631 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1440,6 +1440,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats old_stats;
int ret;
switch (ops->mode) {
@@ -1453,12 +1454,23 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
}
onenand_get_device(mtd, FL_READING);
+
+ old_stats = mtd->ecc_stats;
+
if (ops->datbuf)
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
onenand_release_device(mtd);
return ret;
@@ -2935,7 +2947,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
struct onenand_chip *this = mtd->priv;
unsigned char *pbuf = buf;
int ret;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
/* Force buffer page aligned */
if (len < mtd->writesize) {
@@ -2977,7 +2989,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
if (FLEXONENAND(this)) {
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index b17315f8e1d4..d7fe35bc45cb 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -61,7 +61,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int startblock;
loff_t from;
size_t readlen;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 8b6d7a515445..4cd40af362de 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -200,27 +200,7 @@ config MTD_NAND_TMIO
Support for NAND flash connected to a Toshiba Mobile IO
Controller in some PDAs, including the Sharp SL6000x.
-config MTD_NAND_BRCMNAND
- tristate "Broadcom STB NAND controller"
- depends on ARM || ARM64 || MIPS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enables the Broadcom NAND controller driver. The controller was
- originally designed for Set-Top Box but is used on various BCM7xxx,
- BCM3xxx, BCM63xxx, iProc/Cygnus and more.
-
-if MTD_NAND_BRCMNAND
-
-config MTD_NAND_BRCMNAND_BCMA
- tristate "Broadcom BCMA NAND controller"
- depends on BCMA_NFLASH
- depends on BCMA
- help
- Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
- The glue driver will take care of performing the low-level I/O
- operations to interface the BRCMNAND controller over the BCMA bus.
-
-endif # MTD_NAND_BRCMNAND
+source "drivers/mtd/nand/raw/brcmnand/Kconfig"
config MTD_NAND_BCM47XXNFLASH
tristate "BCM4706 BCMA NAND controller"
@@ -410,7 +390,7 @@ config MTD_NAND_STM32_FMC2
config MTD_NAND_MESON
tristate "Support for NAND controller on Amlogic's Meson SoCs"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
select MFD_SYSCON
help
Enables support for NAND controller on Amlogic's Meson SoCs.
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 296fb16c8dc3..ec7e6eeac55f 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -915,7 +915,7 @@ static int anfc_check_op(struct nand_chip *chip,
if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
return -ENOTSUPP;
- if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+ if (anfc_pkt_len_config(instr->ctx.data.len, NULL, NULL))
return -ENOTSUPP;
break;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index c9ac3baf68c0..41c6bd6e2d72 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -405,6 +405,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
dma_async_issue_pending(nc->dmac);
wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
return 0;
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 8bb17c5a66c3..6487dfc64258 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -14,7 +14,7 @@
#include <linux/bcma/bcma.h>
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown ~1000 retries as maxiumum. */
+ * shown ~1000 retries as maximum. */
#define NFLASH_READY_RETRIES 10000
#define NFLASH_SECTOR_SIZE 512
diff --git a/drivers/mtd/nand/raw/brcmnand/Kconfig b/drivers/mtd/nand/raw/brcmnand/Kconfig
new file mode 100644
index 000000000000..4bc51bf60aca
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/Kconfig
@@ -0,0 +1,49 @@
+config MTD_NAND_BRCMNAND
+ tristate "Broadcom STB NAND controller"
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables the Broadcom NAND controller driver. The controller was
+ originally designed for Set-Top Box but is used on various BCM7xxx,
+ BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
+if MTD_NAND_BRCMNAND
+
+config MTD_NAND_BRCMNAND_BCM63XX
+ tristate "Broadcom BCM63xx NAND controller glue"
+ default BCM63XX
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCM63xx MIPS-based DSL platforms.
+
+config MTD_NAND_BRCMNAND_BCMA
+ tristate "Broadcom BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
+ The glue driver will take care of performing the low-level I/O
+ operations to interface the BRCMNAND controller over the BCMA bus.
+
+config MTD_NAND_BRCMNAND_BCMBCA
+ tristate "Broadcom BCMBCA NAND controller glue"
+ default ARCH_BCMBCA
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCA platforms.
+
+config MTD_NAND_BRCMNAND_BRCMSTB
+ tristate "Broadcom STB Nand controller glue"
+ default ARCH_BRCMSTB
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom STB platforms.
+
+config MTD_NAND_BRCMNAND_IPROC
+ tristate "Broadcom iProc NAND controller glue"
+ default ARCH_BCM_IPROC
+ help
+ Enables the BRCMNAND controller glue driver to register the NAND
+ controller on Broadcom iProc platforms.
+
+endif # MTD_NAND_BRCMNAND
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 16dc7254200e..9907e3ec4bb2 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm6368_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMA) += bcma_nand.o
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 0d72672f8b64..9dac3ca69d57 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -1979,7 +1979,6 @@ static int cadence_nand_force_byte_access(struct nand_chip *chip,
bool force_8bit)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
- int status;
/*
* Callers of this function do not verify if the NAND is using a 16-bit
@@ -1990,9 +1989,7 @@ static int cadence_nand_force_byte_access(struct nand_chip *chip,
if (!(chip->options & NAND_BUSWIDTH_16))
return 0;
- status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
-
- return status;
+ return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
}
static int cadence_nand_cmd_opcode(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index af119e376352..66385c4fb994 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -358,7 +358,7 @@ static int cafe_nand_read_oob(struct nand_chip *chip, int page)
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
- * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * cafe_nand_read_page - [REPLACEABLE] hardware ecc syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index aab93b9e6052..a18d121396aa 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -726,36 +726,40 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
- switch (chip->ecc.engine_type) {
/*
* if ECC was not chosen in DT, decide whether to use HW or SW ECC from
* CS Base Register
*/
- case NAND_ECC_ENGINE_TYPE_NONE:
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
- chip->ecc.read_page = fsl_elbc_read_page;
- chip->ecc.write_page = fsl_elbc_write_page;
- chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
+ }
+
+ switch (chip->ecc.engine_type) {
+ /* if HW ECC was chosen, setup ecc and oob layout */
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
break;
- /* if SW ECC was chosen in DT, we do not need to set anything here */
+ /* if none or SW ECC was chosen, we do not need to set anything here */
+ case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
- /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */
default:
return -EINVAL;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 93da23682d86..01ccbde748f3 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1361,7 +1361,7 @@ error_alloc:
/*
* Handles block mark swapping.
* It can be called in swapping the block mark, or swapping it back,
- * because the the operations are the same.
+ * because the operations are the same.
*/
static void block_mark_swapping(struct gpmi_nand_data *this,
void *payload, void *auxiliary)
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index e91b879b32bd..d4a0987e93ac 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -16,6 +16,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -99,15 +100,12 @@
#define HSNAND_ECC_OFFSET 0x008
-#define NAND_DATA_IFACE_CHECK_ONLY -1
-
#define MAX_CS 2
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
void __iomem *chipaddr;
- dma_addr_t nand_pa;
u32 addr_sel;
};
@@ -120,7 +118,6 @@ struct ebu_nand_controller {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
struct completion dma_access_complete;
- unsigned long clk_rate;
struct clk *clk;
u32 nd_para0;
u8 cs_num;
@@ -580,6 +577,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
+ struct device_node *chip_np;
struct nand_chip *nand;
struct mtd_info *mtd;
struct resource *res;
@@ -594,17 +592,20 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->dev = dev;
nand_controller_init(&ebu_host->controller);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
- ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
+ ebu_host->ebu = devm_platform_ioremap_resource_byname(pdev, "ebunand");
if (IS_ERR(ebu_host->ebu))
return PTR_ERR(ebu_host->ebu);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
- ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
+ ebu_host->hsnand = devm_platform_ioremap_resource_byname(pdev, "hsnand");
if (IS_ERR(ebu_host->hsnand))
return PTR_ERR(ebu_host->hsnand);
- ret = device_property_read_u32(dev, "reg", &cs);
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not find child node for the NAND chip\n");
+
+ ret = of_property_read_u32(chip_np, "reg", &cs);
if (ret) {
dev_err(dev, "failed to get chip select: %d\n", ret);
return ret;
@@ -617,11 +618,10 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->cs_num = cs;
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
- ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
if (IS_ERR(ebu_host->cs[cs].chipaddr))
return PTR_ERR(ebu_host->cs[cs].chipaddr);
- ebu_host->cs[cs].nand_pa = res->start;
ebu_host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ebu_host->clk))
@@ -633,7 +633,6 @@ static int ebu_nand_probe(struct platform_device *pdev)
dev_err(dev, "failed to enable clock: %d\n", ret);
return ret;
}
- ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
ebu_host->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ebu_host->dma_tx)) {
@@ -660,7 +659,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
ebu_host->ebu + EBU_ADDR_SEL(cs));
- nand_set_flash_node(&ebu_host->chip, dev->of_node);
+ nand_set_flash_node(&ebu_host->chip, chip_np);
mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
@@ -716,7 +715,6 @@ static int ebu_nand_remove(struct platform_device *pdev)
}
static const struct of_device_id ebu_nand_match[] = {
- { .compatible = "intel,nand-controller" },
{ .compatible = "intel,lgm-ebunand" },
{}
};
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2455a581fd70..d9f2f1d0b5ef 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -865,13 +865,19 @@ static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
marvell_nfc_enable_dma(nfc);
/* Prepare the DMA transfer */
sg_init_one(&sg, nfc->dma_buf, dma_len);
- dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ ret = dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ if (!ret) {
+ dev_err(nfc->dev, "Could not map DMA S/G list\n");
+ return -ENXIO;
+ }
+
tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
direction == DMA_FROM_DEVICE ?
DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
return -ENXIO;
}
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 829b76b303aa..5ee01231ac4c 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/mtd.h>
#include <linux/mfd/syscon.h>
@@ -56,6 +57,9 @@
#define NFC_RB_IRQ_EN BIT(21)
+#define CLK_DIV_SHIFT 0
+#define CLK_DIV_WIDTH 6
+
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
@@ -151,15 +155,15 @@ struct meson_nfc {
struct nand_controller controller;
struct clk *core_clk;
struct clk *device_clk;
- struct clk *phase_tx;
- struct clk *phase_rx;
+ struct clk *nand_clk;
+ struct clk_divider nand_divider;
unsigned long clk_rate;
u32 bus_timing;
struct device *dev;
void __iomem *reg_base;
- struct regmap *reg_clk;
+ void __iomem *reg_clk;
struct completion completion;
struct list_head chips;
const struct meson_nfc_data *data;
@@ -235,7 +239,7 @@ static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
nfc->timing.tbers_max = meson_chip->tbers_max;
if (nfc->clk_rate != meson_chip->clk_rate) {
- ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate);
+ ret = clk_set_rate(nfc->nand_clk, meson_chip->clk_rate);
if (ret) {
dev_err(nfc->dev, "failed to set clock rate\n");
return;
@@ -454,7 +458,7 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
*bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
- *correct_bitmap |= 1 >> i;
+ *correct_bitmap |= BIT_ULL(i);
continue;
}
if ((nand->options & NAND_NEED_SCRAMBLING) &&
@@ -800,7 +804,7 @@ static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
u8 *data = buf + i * ecc->size;
u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
- if (correct_bitmap & (1 << i))
+ if (correct_bitmap & BIT_ULL(i))
continue;
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 2,
@@ -987,6 +991,8 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
static int meson_nfc_clk_init(struct meson_nfc *nfc)
{
+ struct clk_parent_data nfc_divider_parent_data[1];
+ struct clk_init_data init = {0};
int ret;
/* request core clock */
@@ -1002,21 +1008,28 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
return PTR_ERR(nfc->device_clk);
}
- nfc->phase_tx = devm_clk_get(nfc->dev, "tx");
- if (IS_ERR(nfc->phase_tx)) {
- dev_err(nfc->dev, "failed to get TX clk\n");
- return PTR_ERR(nfc->phase_tx);
- }
-
- nfc->phase_rx = devm_clk_get(nfc->dev, "rx");
- if (IS_ERR(nfc->phase_rx)) {
- dev_err(nfc->dev, "failed to get RX clk\n");
- return PTR_ERR(nfc->phase_rx);
- }
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+ init.num_parents = 1;
+ nfc->nand_divider.reg = nfc->reg_clk;
+ nfc->nand_divider.shift = CLK_DIV_SHIFT;
+ nfc->nand_divider.width = CLK_DIV_WIDTH;
+ nfc->nand_divider.hw.init = &init;
+ nfc->nand_divider.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ALLOW_ZERO;
+
+ nfc->nand_clk = devm_clk_register(nfc->dev, &nfc->nand_divider.hw);
+ if (IS_ERR(nfc->nand_clk))
+ return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- regmap_update_bits(nfc->reg_clk,
- 0, CLK_SELECT_NAND, CLK_SELECT_NAND);
+ writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
if (ret) {
@@ -1030,29 +1043,21 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
goto err_device_clk;
}
- ret = clk_prepare_enable(nfc->phase_tx);
+ ret = clk_prepare_enable(nfc->nand_clk);
if (ret) {
- dev_err(nfc->dev, "failed to enable TX clock\n");
- goto err_phase_tx;
+ dev_err(nfc->dev, "pre enable NFC divider fail\n");
+ goto err_nand_clk;
}
- ret = clk_prepare_enable(nfc->phase_rx);
- if (ret) {
- dev_err(nfc->dev, "failed to enable RX clock\n");
- goto err_phase_rx;
- }
-
- ret = clk_set_rate(nfc->device_clk, 24000000);
+ ret = clk_set_rate(nfc->nand_clk, 24000000);
if (ret)
- goto err_disable_rx;
+ goto err_disable_clk;
return 0;
-err_disable_rx:
- clk_disable_unprepare(nfc->phase_rx);
-err_phase_rx:
- clk_disable_unprepare(nfc->phase_tx);
-err_phase_tx:
+err_disable_clk:
+ clk_disable_unprepare(nfc->nand_clk);
+err_nand_clk:
clk_disable_unprepare(nfc->device_clk);
err_device_clk:
clk_disable_unprepare(nfc->core_clk);
@@ -1061,8 +1066,7 @@ err_device_clk:
static void meson_nfc_disable_clk(struct meson_nfc *nfc)
{
- clk_disable_unprepare(nfc->phase_rx);
- clk_disable_unprepare(nfc->phase_tx);
+ clk_disable_unprepare(nfc->nand_clk);
clk_disable_unprepare(nfc->device_clk);
clk_disable_unprepare(nfc->core_clk);
}
@@ -1368,7 +1372,6 @@ static int meson_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_nfc *nfc;
- struct resource *res;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
@@ -1385,18 +1388,13 @@ static int meson_nfc_probe(struct platform_device *pdev)
nfc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->reg_base = devm_ioremap_resource(dev, res);
+ nfc->reg_base = devm_platform_ioremap_resource_byname(pdev, "nfc");
if (IS_ERR(nfc->reg_base))
return PTR_ERR(nfc->reg_base);
- nfc->reg_clk =
- syscon_regmap_lookup_by_phandle(dev->of_node,
- "amlogic,mmc-syscon");
- if (IS_ERR(nfc->reg_clk)) {
- dev_err(dev, "Failed to lookup clock base\n");
+ nfc->reg_clk = devm_platform_ioremap_resource_byname(pdev, "emmc");
+ if (IS_ERR(nfc->reg_clk))
return PTR_ERR(nfc->reg_clk);
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 6b67b7dfe7ce..33f2c98a030e 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -335,8 +335,6 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
* @chip: NAND chip structure
*
* Lock the device and its controller for exclusive access
- *
- * Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
static void nand_get_device(struct nand_chip *chip)
{
@@ -3818,6 +3816,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_ecc_stats old_stats;
int ret;
ops->retlen = 0;
@@ -3829,11 +3828,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
nand_get_device(chip);
+ old_stats = mtd->ecc_stats;
+
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
else
ret = nand_do_read_ops(chip, from, ops);
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
nand_release_device(chip);
return ret;
}
@@ -5331,11 +5339,10 @@ static int of_get_nand_secure_regions(struct nand_chip *chip)
int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
unsigned int *ncs_array)
{
- struct device_node *np = dev->of_node;
struct gpio_desc **descs;
int ndescs, i;
- ndescs = of_gpio_named_count(np, "cs-gpios");
+ ndescs = gpiod_count(dev, "cs");
if (ndescs < 0) {
dev_dbg(dev, "No valid cs-gpios property\n");
return 0;
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index a3723da2e0a0..e4664fa6fd9e 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -313,7 +313,7 @@ static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res, ret = 0;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -354,7 +354,7 @@ static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
@@ -416,7 +416,7 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret, page_offset;
ops.ooblen = mtd->oobsize;
@@ -756,7 +756,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
loff_t to;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 24beade95c7f..672719023241 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1393,7 +1393,7 @@ static int ns_do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (ns_read_error(page_no)) {
- prandom_bytes(ns->buf.byte, num);
+ get_random_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
@@ -1402,12 +1402,12 @@ static int ns_do_read_error(struct nandsim *ns, int num)
static void ns_do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && prandom_u32() < (1 << 22)) {
+ if (bitflips && get_random_u16() < (1 << 6)) {
int flips = 1;
if (bitflips > 1)
- flips = (prandom_u32() % (int) bitflips) + 1;
+ flips = prandom_u32_max(bitflips) + 1;
while (flips--) {
- int pos = prandom_u32() % (num * 8);
+ int pos = prandom_u32_max(num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 2c87c7d89205..1bfecf502216 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -170,18 +170,11 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- info->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- if (ret == -ENOENT) {
- info->clk = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get clock!\n");
- return ret;
- }
- }
+ /* Not all platforms can gate the clock, so it is optional. */
+ info->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ "failed to get clock!\n");
ret = clk_prepare_enable(info->clk);
if (ret) {
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index b2b42dd1a2de..24f52a30fb13 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -99,7 +99,7 @@ static const struct mtd_ooblayout_ops oob_sm_small_ops = {
static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct sm_oob oob;
int ret;
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 87c1c7dd97eb..5d627048c420 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -862,8 +862,8 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
eccsteps, dma_data_dir);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
eccsteps, dma_transfer_dir,
@@ -893,8 +893,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto err_unmap_data;
+ }
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
nfc->dma_ecc_sg.sgl,
@@ -1799,9 +1801,8 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
nand->cs_used[i] = cs;
}
- nand->wp_gpio = devm_gpiod_get_from_of_node(nfc->dev, dn,
- "wp-gpios", 0,
- GPIOD_OUT_HIGH, "wp");
+ nand->wp_gpio = devm_fwnode_gpiod_get(nfc->dev, of_fwnode_handle(dn),
+ "wp", GPIOD_OUT_HIGH, "wp");
if (IS_ERR(nand->wp_gpio)) {
ret = PTR_ERR(nand->wp_gpio);
if (ret != -ENOENT)
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 9d73910a7ae8..dacd9c0e8b20 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -635,6 +635,7 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
@@ -646,6 +647,8 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_lock(&spinand->lock);
+ old_stats = mtd->ecc_stats;
+
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -668,6 +671,13 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
ops->oobretlen += iter.req.ooblen;
}
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
mutex_unlock(&spinand->lock);
if (ecc_failed && !ret)
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 913db0dd6a8d..64d319e959b2 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -124,7 +124,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -145,7 +145,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -168,7 +168,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index d6db655a1d24..aaa06050c9bc 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -20,6 +20,16 @@ config MTD_BCM63XX_PARTS
This provides partition parsing for BCM63xx devices with CFE
bootloaders.
+config MTD_BRCM_U_BOOT
+ tristate "Broadcom's U-Boot partition parser"
+ depends on ARCH_BCM4908 || COMPILE_TEST
+ help
+ Broadcom uses a custom way of storing U-Boot environment variables.
+ They are placed inside U-Boot partition itself at unspecified offset.
+ It's possible to locate them by looking for a custom header with a
+ magic value. This driver does that and creates subpartitions for
+ each found environment variables block.
+
config MTD_CMDLINE_PARTS
tristate "Command line partition table parsing"
depends on MTD
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2fcf0ab9e7da..23fa4de4016f 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
ofpart-y += ofpart_core.o
diff --git a/drivers/mtd/parsers/brcm_u-boot.c b/drivers/mtd/parsers/brcm_u-boot.c
new file mode 100644
index 000000000000..7c338dc7b8f3
--- /dev/null
+++ b/drivers/mtd/parsers/brcm_u-boot.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define BRCM_U_BOOT_MAX_OFFSET 0x200000
+#define BRCM_U_BOOT_STEP 0x1000
+
+#define BRCM_U_BOOT_MAX_PARTS 2
+
+#define BRCM_U_BOOT_MAGIC 0x75456e76 /* uEnv */
+
+struct brcm_u_boot_header {
+ __le32 magic;
+ __le32 length;
+} __packed;
+
+static const char *names[BRCM_U_BOOT_MAX_PARTS] = {
+ "u-boot-env",
+ "u-boot-env-backup",
+};
+
+static int brcm_u_boot_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct brcm_u_boot_header header;
+ struct mtd_partition *parts;
+ size_t bytes_read;
+ size_t offset;
+ int err;
+ int i = 0;
+
+ parts = kcalloc(BRCM_U_BOOT_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (offset = 0;
+ offset < min_t(size_t, mtd->size, BRCM_U_BOOT_MAX_OFFSET);
+ offset += BRCM_U_BOOT_STEP) {
+ err = mtd_read(mtd, offset, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("Failed to read from %s at 0x%zx: %d\n", mtd->name, offset, err);
+ continue;
+ }
+
+ if (le32_to_cpu(header.magic) != BRCM_U_BOOT_MAGIC)
+ continue;
+
+ parts[i].name = names[i];
+ parts[i].offset = offset;
+ parts[i].size = sizeof(header) + le32_to_cpu(header.length);
+ i++;
+ pr_info("offset:0x%zx magic:0x%08x BINGO\n", offset, header.magic);
+
+ if (i == BRCM_U_BOOT_MAX_PARTS)
+ break;
+ }
+
+ *pparts = parts;
+
+ return i;
+};
+
+static const struct of_device_id brcm_u_boot_of_match_table[] = {
+ { .compatible = "brcm,u-boot" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcm_u_boot_of_match_table);
+
+static struct mtd_part_parser brcm_u_boot_mtd_parser = {
+ .parse_fn = brcm_u_boot_parse,
+ .name = "brcm_u-boot",
+ .of_match_table = brcm_u_boot_of_match_table,
+};
+module_mtd_part_parser(brcm_u_boot_mtd_parser);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index 0ddff1a4b51f..b34856def816 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -193,7 +193,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].mask_flags = mask_flags;
parts[this_part].add_flags = add_flags;
if (name)
- strlcpy(extra_mem, name, name_len + 1);
+ strscpy(extra_mem, name, name_len + 1);
else
sprintf(extra_mem, "Partition_%03d", this_part);
parts[this_part].name = extra_mem;
@@ -298,7 +298,7 @@ static int mtdpart_setup_real(char *s)
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
this_mtd->mtd_id = (char*)(this_mtd + 1);
- strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
+ strscpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
/* link into chain */
this_mtd->next = partitions;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 7f955fade838..4cfec3b7b446 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -239,7 +239,7 @@ static int sm_read_sector(struct sm_ftl *ftl,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_info *mtd = ftl->trans->mtd;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct sm_oob tmp_oob;
int ret = -EIO;
int try = 0;
@@ -323,7 +323,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct mtd_info *mtd = ftl->trans->mtd;
int ret;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 1d05c121904c..04da685c36be 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -163,7 +163,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
/* Read redundancy area (wrapper to MTD_READ_OOB */
static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_RAW;
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index c4f271314f52..440988562cfd 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -47,7 +47,7 @@ struct nand_ecc_test {
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
- unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
@@ -58,9 +58,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
{
unsigned int offset[2];
- offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
+ offset[0] = prandom_u32_max(size * BITS_PER_BYTE);
do {
- offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
+ offset[1] = prandom_u32_max(size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
@@ -71,7 +71,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
static unsigned int random_ecc_bit(size_t size)
{
- unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE);
if (size == 256) {
/*
@@ -79,7 +79,7 @@ static unsigned int random_ecc_bit(size_t size)
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
- offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ offset = prandom_u32_max(3 * BITS_PER_BYTE);
}
return offset;
@@ -266,7 +266,7 @@ static int nand_ecc_test_run(const size_t size)
goto error;
}
- prandom_bytes(correct_data, size);
+ get_random_bytes(correct_data, size);
ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 08084c018a59..98d7508f95b1 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -99,7 +99,7 @@ static int write_page(int log)
static int rewrite_page(int log)
{
int err = 0;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
if (log)
pr_info("rewrite page\n");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 532997e10e29..13fed398937e 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -56,7 +56,7 @@ static void do_vary_offset(void)
static int write_eraseblock(int ebnum)
{
int i;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
@@ -165,7 +165,7 @@ static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
static int verify_eraseblock(int ebnum)
{
int i;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t bitflips;
@@ -260,7 +260,7 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock_in_one_go(int ebnum)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->oobavail * pgcnt;
@@ -338,7 +338,7 @@ static int __init mtd_oobtest_init(void)
int err = 0;
unsigned int i;
uint64_t tmp;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
loff_t addr = 0, addr0;
printk(KERN_INFO "\n");
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
index e70d588083a3..99670ef91f2b 100644
--- a/drivers/mtd/tests/readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -47,7 +47,7 @@ static int read_eraseblock_by_page(int ebnum)
err = ret;
}
if (mtd->oobsize) {
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.len = 0;
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
index c9ec7086bfa1..075bce32caa5 100644
--- a/drivers/mtd/tests/speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -223,7 +223,7 @@ static int __init mtd_speedtest_init(void)
if (!iobuf)
goto out;
- prandom_bytes(iobuf, mtd->erasesize);
+ get_random_bytes(iobuf, mtd->erasesize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
index cb29c8c1b370..75b6ddc5dc4d 100644
--- a/drivers/mtd/tests/stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -45,9 +45,8 @@ static int rand_eb(void)
unsigned int eb;
again:
- eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
- eb %= (ebcnt - 1);
+ eb = prandom_u32_max(ebcnt - 1);
if (bbt[eb])
goto again;
return eb;
@@ -55,20 +54,12 @@ again:
static int rand_offs(void)
{
- unsigned int offs;
-
- offs = prandom_u32();
- offs %= bufsize;
- return offs;
+ return prandom_u32_max(bufsize);
}
static int rand_len(int offs)
{
- unsigned int len;
-
- len = prandom_u32();
- len %= (bufsize - offs);
- return len;
+ return prandom_u32_max(bufsize - offs);
}
static int do_read(void)
@@ -127,7 +118,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
return do_read();
else
return do_write();
@@ -192,7 +183,7 @@ static int __init mtd_stresstest_init(void)
goto out;
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- prandom_bytes(writebuf, bufsize);
+ get_random_bytes(writebuf, bufsize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 4cf67a2a0d04..75eaecc8639f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -409,7 +409,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
- goto out_free_dev;;
+ goto out_free_dev;
}
@@ -441,7 +441,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/*
* Create one workqueue per volume (per registered block device).
- * Rembember workqueues are cheap, they're not threads.
+ * Remember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a32050fecabf..a901f8edfa41 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -807,6 +807,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ * @disable_fm: whether disable fastmap
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -814,11 +815,15 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* automatically. Returns the new UBI device number in case of success and a
* negative error code in case of failure.
*
+ * If @disable_fm is true, ubi doesn't create new fastmap even the module param
+ * 'fm_autoconvert' is set, and existed old fastmap will be destroyed after
+ * doing full scanning.
+ *
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024)
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
{
struct ubi_device *ubi;
int i, err;
@@ -921,7 +926,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
- ubi->fm_disabled = !fm_autoconvert;
+ ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
@@ -962,7 +967,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_attach(ubi, 0);
+ err = ubi_attach(ubi, disable_fm ? 1 : 0);
if (err) {
ubi_err(ubi, "failed to attach mtd%d, error %d",
mtd->index, err);
@@ -1242,7 +1247,8 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
- p->vid_hdr_offs, p->max_beb_per1024);
+ p->vid_hdr_offs, p->max_beb_per1024,
+ false);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index cc9a28cf9d82..f43430b9c1e6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -672,7 +672,7 @@ static int verify_rsvol_req(const struct ubi_device *ubi,
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
- * the request, opens the volume and calls corresponding volumes management
+ * request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
@@ -1041,7 +1041,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
- req.max_beb_per1024);
+ req.max_beb_per1024, !!req.disable_fm);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 31d427ee191a..908d0e088557 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -590,7 +590,7 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
- ubi->dbg.power_cut_counter += prandom_u32() % range;
+ ubi->dbg.power_cut_counter += prandom_u32_max(range);
}
return 0;
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 118248a5d7d4..dc8d8f83657a 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -73,7 +73,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_bitflips)
- return !(prandom_u32() % 200);
+ return !prandom_u32_max(200);
return 0;
}
@@ -87,7 +87,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 500);
+ return !prandom_u32_max(500);
return 0;
}
@@ -101,7 +101,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 400);
+ return !prandom_u32_max(400);
return 0;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ccc5979642b7..09c408c45a62 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -377,7 +377,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
- * success, %1 in case of contention, and and a negative error code in case of
+ * success, %1 in case of contention, and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 6e95c4b1473e..ca2d9efe62c3 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -20,8 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -34,7 +33,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
*/
static inline void free_seen(unsigned long *seen)
{
- kfree(seen);
+ bitmap_free(seen);
}
/**
@@ -1108,8 +1107,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
if (!ubi->fast_attach)
return 0;
- vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
if (!vol->checkmap)
return -ENOMEM;
@@ -1118,7 +1116,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
- kfree(vol->checkmap);
+ bitmap_free(vol->checkmap);
}
/**
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8a7306cc1947..01b644861253 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1147,7 +1147,7 @@ fail:
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
- * This function returns zero if the erase counter header is all right and and
+ * This function returns zero if the erase counter header is all right and
* a negative error code if not or if an error occurred.
*/
static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 386db0598e95..2c9cd3b6434f 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -131,7 +131,7 @@ enum {
* is changed radically. This field is duplicated in the volume identifier
* header.
*
- * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the
* volume identifier header and user data, relative to the beginning of the
* physical eraseblock. These values have to be the same for all physical
* eraseblocks.
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 078112e23dfd..c8f1bd4fa100 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -86,7 +86,7 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
- * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data
* integrity error reported by the MTD driver
* (uncorrectable ECC error in case of NAND)
* UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
@@ -281,7 +281,7 @@ struct ubi_eba_leb_desc {
/**
* struct ubi_volume - UBI volume description data structure.
- * @dev: device object to make use of the the Linux device model
+ * @dev: device object to make use of the Linux device model
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
@@ -439,7 +439,7 @@ struct ubi_debug_info {
/**
* struct ubi_device - UBI device description structure
- * @dev: UBI device object to use the the Linux device model
+ * @dev: UBI device object to use the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
@@ -937,7 +937,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024);
+ int vid_hdr_offset, int max_beb_per1024,
+ bool disable_fm);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 6ea95ade4ca6..8fcc0bdf0635 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -623,7 +623,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
* @ubi: UBI device description object
* @vol_id: volume ID
*
- * Returns zero if volume is all right and a a negative error code if not.
+ * Returns zero if volume is all right and a negative error code if not.
*/
static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
@@ -776,7 +776,7 @@ fail:
* self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
- * Returns zero if volumes are all right and a a negative error code if not.
+ * Returns zero if volumes are all right and a negative error code if not.
*/
static int self_check_volumes(struct ubi_device *ubi)
{
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 55bae06cf408..68eb0f21b3fe 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -376,7 +376,7 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
* refill_wl_user_pool().
* @ubi: UBI device description object
*
- * This function returns a a wear leveling entry in case of success and
+ * This function returns a wear leveling entry in case of success and
* NULL in case of failure.
*/
static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
@@ -429,7 +429,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
/**
* sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
- * @e: the the physical eraseblock to erase
+ * @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a negative error code in
@@ -1016,7 +1016,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
/*
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
- * the WL worker has to be scheduled anyway.
+ * WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
#ifdef CONFIG_MTD_UBI_FASTMAP
@@ -1464,7 +1464,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
- * @force: dont't read the block, assume bitflips happened and take action.
+ * @force: don't read the block, assume bitflips happened and take action.
*
* This function reads the given eraseblock and checks if bitflips occured.
* In case of bitflips, the eraseblock is scheduled for scrubbing.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 15d4a38b1351..9e63b8c43f3e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,6 +76,7 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
+ depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
@@ -85,8 +86,6 @@ config WIREGUARD
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 24bb50dfd362..e84c49bf4d0c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4806,7 +4806,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
switch (packets_per_slave) {
case 0:
- slave_id = prandom_u32();
+ slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 841da29cef93..f6c0938027ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -178,6 +178,8 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 824cab80aa02..e91648ed7386 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -477,7 +477,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -729,6 +729,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 6871d474dabf..7b52fda73d82 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1916,7 +1916,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 07f687f29b34..50f2ac8319ff 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -310,6 +310,38 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -397,6 +429,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *cmd_len,
@@ -502,6 +571,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -1133,6 +1205,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1351,9 +1426,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 5669c92c93f7..c5c3b4e92f28 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
struct qca8k_mgmt_eth_data *mgmt_eth_data;
struct qca8k_priv *priv = ds->priv;
struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
u8 len, cmd;
+ int i;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
mgmt_eth_data = &priv->mgmt_eth_data;
- cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
- len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
/* Make sure the seq match the requested packet */
- if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
- mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
/* Get the rest of the 12 byte of data.
* The read/write function will extract the requested data.
*/
- if (len > QCA_HDR_MGMT_DATA1_LEN)
- memcpy(mgmt_eth_data->data + 1, skb->data,
- QCA_HDR_MGMT_DATA2_LEN);
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
}
complete(&mgmt_eth_data->rw_done);
@@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
struct qca_mgmt_ethhdr *mgmt_ethhdr;
unsigned int real_len;
struct sk_buff *skb;
- u32 *data2;
+ __le32 *data2;
+ u32 command;
u16 hdr;
+ int i;
skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
if (!skb)
@@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
- mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
QCA_HDR_MGMT_CHECK_CODE_VAL);
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
if (cmd == MDIO_WRITE)
- mgmt_ethhdr->mdio_data = *val;
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
mgmt_ethhdr->hdr = htons(hdr);
data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
- if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
- memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
return skb;
}
@@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
- mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
}
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
@@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
struct mib_ethhdr *mib_ethhdr;
- int i, mib_len, offset = 0;
- u64 *data;
+ __le32 *data2;
u8 port;
+ int i;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
@@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
if (port != mib_eth_data->req_port)
goto exit;
- data = mib_eth_data->data;
+ data2 = (__le32 *)skb->data;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
if (i < 3) {
- data[i] = mib_ethhdr->data[i];
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
continue;
}
- mib_len = sizeof(uint32_t);
-
/* Some mib are 64 bit wide */
if (mib->size == 2)
- mib_len = sizeof(uint64_t);
-
- /* Copy the mib value from packet to the */
- memcpy(data + i, skb->data + offset, mib_len);
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
- /* Set the offset for the next mib */
- offset += mib_len;
+ data2 += mib->size;
}
exit:
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index aaee7c4248e6..1744d623999d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1169,6 +1169,11 @@ static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
return ret;
}
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
static int adin1110_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -1177,6 +1182,9 @@ static int adin1110_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
int ret = 0;
+ if (!adin1110_port_dev_check(dev))
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(info->upper_dev)) {
@@ -1202,11 +1210,6 @@ static void adin1110_disconnect_phy(void *data)
phy_disconnect(data);
}
-static bool adin1110_port_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &adin1110_netdev_ops;
-}
-
static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 2e6c5f258a1f..0ddfb5b5d53c 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,8 +17,3 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_tg3.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 16b73bb9acc7..5af16e5f9ad0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -484,7 +484,7 @@ struct bcm_rsb {
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
-#define SP_LT_NUM_HW_RX_DESC_WORDS 256
+#define SP_LT_NUM_HW_RX_DESC_WORDS 512
/* Internal linked-list RAM size */
#define SP_NUM_TX_DESC 1536
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index eed98c10ca9d..04cf7684f1b0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3874,7 +3874,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
+ get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index a36803e79e92..8a6f788f6294 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
{
+ bool rc = false;
u32 datalen;
u16 index;
u8 *buf;
@@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
- goto err;
+ goto done;
}
if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
- goto err;
+ goto done;
}
- return true;
+ rc = true;
-err:
+done:
kfree(buf);
- return false;
+ return rc;
}
static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index e86503d97f32..2198e35d9e18 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4105,8 +4105,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
atomic_set(&cp->csk_tbl[i].ref_count, 0);
- port_id = prandom_u32();
- port_id %= CNIC_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE);
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
@@ -4165,7 +4164,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = prandom_u32();
+ seed = get_random_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index f90bfba4b303..c2e7037c7ba1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1063,7 +1063,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
@@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = prandom_u32();
+ inet_sk(sk)->inet_id = get_random_u16();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 539992dad8ba..a4256087ac82 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
if (csk_mem_free(cdev, sk)) {
- current_timeo = (prandom_u32() % (HZ / 5)) + 2;
- vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = prandom_u32_max(HZ / 5) + 2;
+ vm_wait = prandom_u32_max(HZ / 5) + 2;
}
add_wait_queue(sk_sleep(sk), &wait);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index e6416332ec79..a842e1999122 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -7,7 +7,6 @@
#include <linux/math64.h>
#include <linux/refcount.h>
#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 00fafc0f8512..430eccea8e5e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
+ }
__module_get(THIS_MODULE);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7e75706f76db..87f36d1ce800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2c07fa8ecfc8..b5dcd15ced36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 69e67eb6aea7..b97c95f89fa0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1457,14 +1457,6 @@ err:
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 41f86e9535a0..768290dc6f48 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6d4009e0cbd6..cd7b52fb6b46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,14 +10,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index bb962987f300..821df248f8be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
index 5ba618aed6ad..4a343f853b28 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -1182,8 +1182,10 @@ static int mcs_register_interrupts(struct mcs *mcs)
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
- if (!mcs->tx_sa_active)
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
goto exit;
+ }
return ret;
exit:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 64f3acd7f67b..9ec5f38d38a8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,7 +133,7 @@ static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
default:
ret = -EINVAL;
goto fail;
- };
+ }
mutex_unlock(&mbox->lock);
@@ -284,7 +284,7 @@ static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
if (!sc_req) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
@@ -594,7 +594,7 @@ static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
if (!req) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
@@ -815,6 +815,7 @@ free_flowid:
cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
txsc->hw_flow_id, false);
fail:
+ kfree(txsc);
return ERR_PTR(ret);
}
@@ -870,6 +871,7 @@ free_flowid:
cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
rxsc->hw_flow_id, false);
fail:
+ kfree(rxsc);
return ERR_PTR(ret);
}
@@ -1653,6 +1655,7 @@ int cn10k_mcs_init(struct otx2_nic *pfvf)
return 0;
fail:
dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5803d7f9137c..892ca88e0cf4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2810,7 +2810,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_del_mcam_entries;
+ goto err_mcs_free;
}
err = otx2_wq_init(pf);
@@ -2849,6 +2849,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_mcs_free:
+ cn10k_mcs_free(pf);
err_del_mcam_entries:
otx2_mcam_flow_del(pf);
err_ptp_destroy:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
index 6f2b95a5263e..1da9c1bc1ee9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -96,6 +96,8 @@ int prestera_mall_replace(struct prestera_flow_block *block,
list_for_each_entry(binding, &block->binding_list, list) {
err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err == -EEXIST)
+ return err;
if (err)
goto rollback;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 4f65df0ae5e8..aa080dc57ff0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -498,8 +498,8 @@ prestera_nexthop_group_get(struct prestera_switch *sw,
refcount_inc(&nh_grp->refcount);
} else {
nh_grp = __prestera_nexthop_group_create(sw, key);
- if (IS_ERR(nh_grp))
- return ERR_CAST(nh_grp);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
refcount_set(&nh_grp->refcount, 1);
}
@@ -651,7 +651,7 @@ prestera_fib_node_create(struct prestera_switch *sw,
case PRESTERA_FIB_TYPE_UC_NH:
fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
nh_grp_key);
- if (!fib_node->info.nh_grp)
+ if (IS_ERR(fib_node->info.nh_grp))
goto err_nh_grp_get;
grp_id = fib_node->info.nh_grp->grp_id;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index f0e9d6ea88c5..1005182ce3bc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -107,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
entry = prestera_span_entry_find_by_id(sw->span, span_id);
if (!entry)
- return false;
+ return -ENOENT;
if (!refcount_dec_and_test(&entry->ref_count))
return 0;
@@ -151,6 +151,9 @@ int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
{
int err;
+ if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
+ return -ENOENT;
+
err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index fe66ba8793cf..45ba0970504a 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,8 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_mtk_ppe.o += -Wno-array-bounds
-endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4fba7cb0144b..7cd381530aa4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev)
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- return -ENXIO;
+ err = -ENXIO;
+ goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_wed_exit;
}
eth->clks[i] = NULL;
}
@@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_hw_init(eth);
if (err)
- return err;
+ goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
@@ -4179,6 +4183,8 @@ err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
return err;
}
@@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev)
phylink_disconnect_phy(mac->phylink);
}
+ mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index ae00e572390d..2d8ca99f2467 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
return 0;
}
-static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
-{
- return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
- FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
-}
-
static bool
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
struct mtk_foe_entry *data)
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 099b6e0df619..65e01bf4b4d2 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
pdev = of_find_device_by_node(np);
if (!pdev)
- return;
+ goto err_of_node_put;
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return;
+ goto err_put_device;
regs = syscon_regmap_lookup_by_phandle(np, NULL);
if (IS_ERR(regs))
- return;
+ goto err_put_device;
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
@@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw_list[index] = hw;
+ mutex_unlock(&hw_lock);
+
+ return;
+
unlock:
mutex_unlock(&hw_lock);
+err_put_device:
+ put_device(&pdev->dev);
+err_of_node_put:
+ of_node_put(np);
}
void mtk_wed_exit(void)
@@ -1146,6 +1154,7 @@ void mtk_wed_exit(void)
hw_list[i] = NULL;
debugfs_remove(hw->debugfs_dir);
put_device(hw->dev);
+ of_node_put(hw->node);
kfree(hw);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index a53e205f4a89..be74e1403328 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -115,6 +115,7 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
struct mlx5e_flow_meters *flow_meters;
u8 cir_man, cir_exp, cbs_man, cbs_exp;
struct mlx5_aso_wqe *aso_wqe;
+ unsigned long expires;
struct mlx5_aso *aso;
u64 rate, burst;
u8 ds_cnt;
@@ -187,7 +188,12 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
/* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
- err = mlx5_aso_poll_cq(aso, true, 10);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(aso, true);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
mutex_unlock(&flow_meters->aso_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 5da746da898d..41970067917b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1405,7 +1405,7 @@ static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_mac
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false, 10);
+ err = mlx5_aso_poll_cq(maso, false);
mutex_unlock(&aso->aso_lock);
return err;
@@ -1430,7 +1430,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false, 10);
+ err = mlx5_aso_poll_cq(maso, false);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 21e14507ff5c..baa8092f335e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -381,20 +381,12 @@ void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
WRITE_ONCE(doorbell_cseg, NULL);
}
-int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
{
struct mlx5_aso_cq *cq = &aso->cq;
struct mlx5_cqe64 *cqe;
- unsigned long expires;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
-
- expires = jiffies + msecs_to_jiffies(interval_ms);
- while (!cqe && time_is_after_jiffies(expires)) {
- usleep_range(2, 10);
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- }
-
if (!cqe)
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index d854e01d7fc5..2d40dcf9d42e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -83,7 +83,7 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
u32 obj_id, u32 opc_mode);
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
struct mlx5_wqe_ctrl_seg *doorbell_cseg);
-int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
void mlx5_aso_destroy(struct mlx5_aso *aso);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 3ab3e4536b99..8593cafa6368 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -373,10 +373,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (ipv6_tun) {
key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
key_size +=
- sizeof(struct nfp_flower_ipv6_udp_tun);
+ sizeof(struct nfp_flower_ipv6_gre_tun);
} else {
key_size +=
- sizeof(struct nfp_flower_ipv4_udp_tun);
+ sizeof(struct nfp_flower_ipv4_gre_tun);
}
if (enc_op.key) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 5d58fd99be3c..19d4848df17d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -2817,11 +2817,15 @@ err_out:
* than the full array, but leave the qcq shells in place
*/
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
- lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->txqcqs[i]);
+ if (lif->txqcqs && lif->txqcqs[i]) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ }
- lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->rxqcqs[i]);
+ if (lif->rxqcqs && lif->rxqcqs[i]) {
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
}
if (err)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 023682cd2768..9e59669a93dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -129,7 +129,7 @@ static int rocker_reg_test(const struct rocker *rocker)
u64 test_reg;
u64 rnd;
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd >>= 1;
rocker_write32(rocker, TEST_REG, rnd);
test_reg = rocker_read32(rocker, TEST_REG);
@@ -139,9 +139,9 @@ static int rocker_reg_test(const struct rocker *rocker)
return -EIO;
}
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd <<= 31;
- rnd |= prandom_u32();
+ rnd |= get_random_u32();
rocker_write64(rocker, TEST_REG64, rnd);
test_reg = rocker_read64(rocker, TEST_REG64);
if (test_reg != rnd * 2) {
@@ -224,7 +224,7 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
if (err)
goto unmap;
- prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
expect[i] = ~buf[i];
err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index d1e1aa19a68e..7022fb2005a2 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
bool was_enabled = efx->port_enabled;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ /* If this function is a VF and we have access to the parent PF,
+ * then use the PF control path to attempt to change the VF MAC address.
+ */
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac[ETH_ALEN];
+
+ /* net_dev->dev_addr can be zeroed by efx_net_stop in
+ * efx_ef10_sriov_set_vf_mac, so pass in a copy.
+ */
+ ether_addr_copy(mac, efx->net_dev->dev_addr);
+
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
+ if (!rc)
+ return 0;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Updating VF mac via PF failed (%d), setting directly\n",
+ rc);
+ }
+#endif
+
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
@@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_open(efx->net_dev);
efx_device_attach_if_not_resetting(efx);
-#ifdef CONFIG_SFC_SRIOV
- if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
-
- if (rc == -EPERM) {
- struct efx_nic *efx_pf;
-
- /* Switch to PF and change MAC address on vport */
- efx_pf = pci_get_drvdata(pci_dev_pf);
-
- rc = efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr);
- } else if (!rc) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
- struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
- unsigned int i;
-
- /* MAC address successfully changed by VF (with MAC
- * spoofing) so update the parent PF if possible.
- */
- for (i = 0; i < efx_pf->vf_count; ++i) {
- struct ef10_vf *vf = nic_data->vf + i;
-
- if (vf->efx == efx) {
- ether_addr_copy(vf->mac,
- efx->net_dev->dev_addr);
- return 0;
- }
- }
- }
- } else
-#endif
if (rc == -EPERM) {
netif_err(efx, drv, efx->net_dev,
"Cannot change MAC address; use sfboot to enable"
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index be72e71da027..5f201a547e5b 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -162,9 +162,9 @@ struct efx_filter_spec {
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
- u32 vport_id;
u32 rss_context;
- __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ u32 vport_id;
+ __be16 outer_vid;
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 4826e6a7e4ce..9220afeddee8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
return false;
- return memcmp(&left->outer_vid, &right->outer_vid,
+ return memcmp(&left->vport_id, &right->vport_id,
sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
+ offsetof(struct efx_filter_spec, vport_id)) == 0;
}
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
+ return jhash2((const u32 *)&spec->vport_id,
(sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ offsetof(struct efx_filter_spec, vport_id)) / 4,
0);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 65c96773c6d2..8273e6a175c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (priv->plat->tx_queues_to_use > 1)
priv->phylink_config.mac_capabilities &=
~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ priv->phylink_config.mac_managed_pm = true;
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 62deed210a95..1c16548415cd 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp)
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
- const char *bursts;
+ const char *bursts = "64";
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
@@ -2896,8 +2896,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), DRV_NAME);
- if (IS_ERR(hpreg_res)) {
- err = PTR_ERR(hpreg_res);
+ if (!hpreg_res) {
+ err = -EBUSY;
dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_clear_quattro;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 3cbe4ec46234..7f86068f3ff6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2476,7 +2476,10 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ if (port->ndev)
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ else
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
attrs.phys.port_number = port->port_id;
attrs.switch_id.id_len = sizeof(resource_size_t);
memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 3e69079ed694..791b4a53d69f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -438,7 +438,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
if ((--bc->hdlctx.slotcnt) > 0)
return 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
- if ((prandom_u32() % 256) > bc->ch_params.ppersist)
+ if (get_random_u8() > bc->ch_params.ppersist)
return 0;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index a6184d6c7b15..2263029d1a20 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -377,7 +377,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
if ((--s->hdlctx.slotcnt) > 0)
return;
s->hdlctx.slotcnt = s->ch_params.slottime;
- if ((prandom_u32() % 256) > s->ch_params.ppersist)
+ if (get_random_u8() > s->ch_params.ppersist)
return;
start_tx(dev, s);
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 980f2be32f05..2ed2f836f09a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -626,7 +626,7 @@ static void yam_arbitrate(struct net_device *dev)
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
- if ((prandom_u32() % 256) > yp->pers)
+ if (get_random_u8() > yp->pers)
return;
yam_start_tx(dev, yp);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 25b38a374e3c..dd5919ec408b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1051,7 +1051,8 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
-
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
/* Is the current data path through the VF NIC? */
bool data_path_is_vf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f066de0da492..9352dad58996 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1580,6 +1580,10 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5f08482065ca..89eb4f179a3c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2313,6 +2313,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
+ /* Fallback path to check synthetic vf with
+ * help of mac addr
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+ netdev_notice(vf_netdev,
+ "falling back to mac addr based matching\n");
+ return ndev;
+ }
+ }
+
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
@@ -2409,6 +2421,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
ret = netvsc_switch_datapath(ndev, vf_is_up);
if (ret) {
@@ -2440,6 +2457,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netvsc_vf_setxdp(vf_netdev, NULL);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2479,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 11f767a20444..eea777ec2541 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
+#include <linux/string.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
@@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
- memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
data + RNDIS_HEADER_SIZE + sizeof(*req_id),
- resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+ "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 713e3354cb2e..c5cfe8555199 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- schedule_work(&port->bc_work);
+ queue_work(system_unbound_wq, &port->bc_work);
if (err)
goto free_nskb;
@@ -1192,7 +1192,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->min_mtu = 0;
+ /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 9e9adde335c8..349b7b1dbbf2 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1758,7 +1758,7 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
{
- u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE);
+ u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE);
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
QCA808X_MASTER_SLAVE_SEED_CFG,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 8549e0e356c9..b60db8b6f477 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_EEE_ERROR_CHANGE_INT_EN);
if (!dp83822->fx_enabled)
- misr_status |= DP83822_MDI_XOVER_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
+ misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6939563d3b7c..417527f8bbf5 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -853,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev)
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required for SGMII
+ * in addition to clearing bit 7, handled above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3757e069c486..54a17b576eac 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1838,7 +1838,7 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
}
-static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
+static __always_inline int ksz886x_cable_test_result_trans(u16 status, u16 mask)
{
switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_NORMAL:
@@ -1854,13 +1854,13 @@ static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
}
}
-static bool ksz886x_cable_test_failed(u16 status, u16 mask)
+static __always_inline bool ksz886x_cable_test_failed(u16 status, u16 mask)
{
return FIELD_GET(mask, status) ==
KSZ8081_LMD_STAT_FAIL;
}
-static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
+static __always_inline bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
{
switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_OPEN:
@@ -1871,7 +1871,8 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
return false;
}
-static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask)
+static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *phydev,
+ u16 status, u16 data_mask)
{
int dt;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 75464df191ef..6547b6cc6cbe 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1661,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
if (phy_interrupt_is_valid(phy))
phy_request_interrupt(phy);
+ if (pl->config->mac_managed_pm)
+ phy->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 29e3fa86bac3..daac293e8ede 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -257,6 +257,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
@@ -268,6 +269,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_25GBASE_CR_S:
case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_10GBASE_T_SFI:
case SFF8024_ECC_10GBASE_T_SR:
@@ -276,6 +278,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
case SFF8024_ECC_5GBASE_T:
phylink_set(modes, 5000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces);
break;
case SFF8024_ECC_2_5GBASE_T:
phylink_set(modes, 2500baseT_Full);
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 73d163704068..687dec49c1e1 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -14,6 +14,7 @@ if PSE_CONTROLLER
config PSE_REGULATOR
tristate "Regulator based PSE controller"
+ depends on REGULATOR || COMPILE_TEST
help
This module provides support for simple regulator based Ethernet Power
Sourcing Equipment without automatic classification support. For
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f8221a7acf62..ce1f6081d582 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1380,7 +1380,8 @@ static void hso_serial_cleanup(struct tty_struct *tty)
}
/* setup the term */
-static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void hso_serial_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct hso_serial *serial = tty->driver_data;
unsigned long flags;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0e57083d442..7106932c6f88 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -225,6 +225,9 @@ struct virtnet_info {
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* number of sg entries allocated for big packets */
+ unsigned int big_packets_num_skbfrags;
+
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
@@ -1331,10 +1334,10 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
char *p;
int i, err, offset;
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
- /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
- for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
+ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
@@ -1365,7 +1368,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -3682,13 +3685,35 @@ static int virtnet_validate(struct virtio_device *vdev)
return 0;
}
+static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
+{
+ return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO);
+}
+
+static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
+{
+ bool guest_gso = virtnet_check_guest_gso(vi);
+
+ /* If device can receive ANY guest GSO packets, regardless of mtu,
+ * allocate packets of maximum size, otherwise limit it to only
+ * mtu size worth only.
+ */
+ if (mtu > ETH_DATA_LEN || guest_gso) {
+ vi->big_packets = true;
+ vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
+ }
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
- int mtu;
+ int mtu = 0;
/* Find if host supports multiqueue/rss virtio_net device */
max_queue_pairs = 1;
@@ -3776,13 +3801,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- /* If we can receive ANY GSO packets, we must allocate large ones. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
- vi->big_packets = true;
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
@@ -3848,12 +3866,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->mtu = mtu;
dev->max_mtu = mtu;
-
- /* TODO: size buffers correctly in this case. */
- if (dev->mtu > ETH_DATA_LEN)
- vi->big_packets = true;
}
+ virtnet_set_big_packets(vi, mtu);
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 41db10f9be49..19eac00b2381 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -284,7 +284,7 @@ static __init bool randomized_test(void)
mutex_lock(&mutex);
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
@@ -299,7 +299,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 4);
- prandom_bytes(mutate_mask, 4);
+ get_random_bytes(mutate_mask, 4);
mutate_amount = prandom_u32_max(32);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -310,7 +310,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t,
@@ -328,7 +328,7 @@ static __init bool randomized_test(void)
}
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
@@ -343,7 +343,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 16);
- prandom_bytes(mutate_mask, 16);
+ get_random_bytes(mutate_mask, 16);
mutate_amount = prandom_u32_max(128);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -354,7 +354,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t,
@@ -381,13 +381,13 @@ static __init bool randomized_test(void)
for (j = 0;; ++j) {
for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
pr_err("allowedips random v4 self-test: FAIL\n");
goto free;
}
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
pr_err("allowedips random v6 self-test: FAIL\n");
goto free;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 84d956ad4093..2d1e3fd9b526 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2081,7 +2081,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
enum nl80211_band band;
- u16 *he_mcs_mask;
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX];
u8 max_nss, he_mcs;
u16 he_tx_mcs = 0, v = 0;
int i, he_nss, nss_idx;
@@ -2098,7 +2098,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
return;
band = def.chan->band;
- he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+ memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
+ sizeof(he_mcs_mask));
if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
return;
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index cb5414265a9b..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
if (!wait || !max || likely(bytes_read) || fail_stats > 110)
break;
- msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
+ if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+ break;
}
if (wait && !bytes_read && max)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 479041f070f9..10d9d9c63b28 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1128,7 +1128,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
/* 100ms ~ 300ms */
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
- 100 * (1 + prandom_u32() % 3));
+ 100 * (1 + prandom_u32_max(3)));
else
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index d0a7465be586..170c61c8136c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -177,7 +177,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
pfn_mac.mac[i] &= mac_mask[i];
- pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
+ pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
}
/* Clear multi bit */
pfn_mac.mac[0] &= 0xFE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index ed586e6d7d64..de0c545d50fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1099,7 +1099,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
- u32 rand = (prandom_u32() % (64 - 36)) + 36;
+ u32 rand = prandom_u32_max(64 - 36) + 36;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index cc92706b3d16..cbd8053a9e35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -384,6 +384,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -478,6 +479,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index df51b5b1f171..a40636c90ec3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4973,6 +4973,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
}
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
+ goto out;
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
hdr = (void *)skb->data;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 535995e8279f..bcd564dc3554 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -239,7 +239,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
if (ieee80211_is_action(mgmt->frame_control))
skb = mwifiex_clone_skb_for_tx_status(priv,
@@ -303,7 +303,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
duration);
if (!ret) {
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
priv->roc_cfg.cookie = *cookie;
priv->roc_cfg.chan = *chan;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 4901aa02b4fb..7378c4d1e156 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -696,10 +696,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_reserve(skb, q->buf_offset);
- if (q == &dev->q_rx[MT_RXQ_MCU]) {
- u32 *rxfce = (u32 *)skb->cb;
- *rxfce = info;
- }
+ *(u32 *)skb->cb = info;
__skb_put(skb, len);
done++;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index d6aae60c440d..2ce1705c0f43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -345,6 +345,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_status = *(u32 *)skb->cb;
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
u16 hdr_gap;
int phy_idx;
@@ -394,7 +395,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
@@ -610,14 +612,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index be97dede2634..a4bcc617c1a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -233,6 +233,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
struct mt7915_sta *msta = NULL;
+ u32 csum_status = *(u32 *)skb->cb;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
@@ -288,7 +289,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if ((rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
@@ -446,14 +448,14 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index e4868c492bc0..650ab97ae052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -230,6 +230,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7921_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
+ u32 csum_status = *(u32 *)skb->cb;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@@ -290,7 +291,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e67cc7909bce..6c054850363f 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -60,14 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
+ struct ieee80211_rate_status rs = {};
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
if (wcid) {
status.sta = wcid_to_sta(wcid);
- status.rates = NULL;
- status.n_rates = 0;
+ if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
+ rs.rate_idx = wcid->rate;
+ status.rates = &rs;
+ status.n_rates = 1;
+ } else {
+ status.n_rates = 0;
+ }
}
hw = mt76_tx_status_get_hw(dev, skb);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index b89047965e78..9bbfff803357 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1161,7 +1161,7 @@ static int mgmt_tx(struct wiphy *wiphy,
const u8 *vendor_ie;
int ret = 0;
- *cookie = prandom_u32();
+ *cookie = get_random_u32();
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index bfdf03bfa6c5..73e6f9408b51 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -449,7 +449,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
- u32 short_cookie = prandom_u32();
+ u32 short_cookie = get_random_u32();
u16 flags = 0;
u16 freq;
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 5a3e7a626702..4a9e4b5d3547 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -1594,7 +1594,7 @@ static int cw1200_get_prio_queue(struct cw1200_common *priv,
edca = &priv->edca.params[i];
score = ((edca->aifns + edca->cwmin) << 16) +
((edca->cwmax - edca->cwmin) *
- (get_random_int() & 0xFFFF));
+ get_random_u16());
if (score < best && (winner < 0 || i != 3)) {
best = score;
winner = i;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 3e3922d4c788..28c0f06e311f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6100,7 +6100,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
- nic_addr = get_random_int();
+ nic_addr = get_random_u32();
} else {
oui_addr = wl->fuse_oui_addr;
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index b8c7843730ed..62e9f7d6c9fe 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/wwan.h>
#include <net/rtnetlink.h>
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff09a8cedf93..2397a903d8f5 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -311,7 +311,7 @@ err_unreg_dev:
return ERR_PTR(err);
err_free_dev:
- kfree(dev);
+ put_device(&dev->dev);
return ERR_PTR(err);
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index bbe5099c836d..c60ec0b373c5 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -170,15 +170,12 @@ EXPORT_SYMBOL(nvdimm_namespace_disk_name);
const uuid_t *nd_dev_to_uuid(struct device *dev)
{
- if (!dev)
- return &uuid_null;
-
- if (is_namespace_pmem(dev)) {
+ if (dev && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
- } else
- return &uuid_null;
+ }
+ return &uuid_null;
}
EXPORT_SYMBOL(nd_dev_to_uuid);
@@ -388,7 +385,7 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
*
* BLK-space is valid as long as it does not precede a PMEM
* allocation in a given region. PMEM-space must be contiguous
- * and adjacent to an existing existing allocation (if one
+ * and adjacent to an existing allocation (if one
* exists). If reserving PMEM any space is valid.
*/
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
@@ -839,7 +836,6 @@ static ssize_t size_store(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev->parent);
unsigned long long val;
- uuid_t **uuid = NULL;
int rc;
rc = kstrtoull(buf, 0, &val);
@@ -853,16 +849,12 @@ static ssize_t size_store(struct device *dev,
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
- if (is_namespace_pmem(dev)) {
+ /* setting size zero == 'delete namespace' */
+ if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
- uuid = &nspm->uuid;
- }
-
- if (rc == 0 && val == 0 && uuid) {
- /* setting size zero == 'delete namespace' */
- kfree(*uuid);
- *uuid = NULL;
+ kfree(nspm->uuid);
+ nspm->uuid = NULL;
}
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ec5219680092..85ca5b4da3cf 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 64
+#define MAX_STRUCT_PAGE_SIZE 128
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0e92ab4b3283..61af072ac98f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -787,7 +787,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 64. We
+ * Also make sure size of struct page is less than 128. We
* want to make sure we use large enough size here so that
* we don't have a dynamic reserve space depending on
* struct page size. But we also want to make sure we notice
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 473a71bbd9c9..e0875d369762 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -509,16 +509,13 @@ static ssize_t align_store(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long val, dpa;
- u32 remainder;
+ u32 mappings, remainder;
int rc;
rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
- if (!nd_region->ndr_mappings)
- return -ENXIO;
-
/*
* Ensure space-align is evenly divisible by the region
* interleave-width because the kernel typically has no facility
@@ -526,7 +523,8 @@ static ssize_t align_store(struct device *dev,
* contribute to the tail capacity in system-physical-address
* space for the namespace.
*/
- dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
+ mappings = max_t(u32, 1, nd_region->ndr_mappings);
+ dpa = div_u64_rem(val, mappings, &remainder);
if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
|| val > region_size(nd_region) || remainder)
return -EINVAL;
@@ -1096,7 +1094,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
return rc;
}
/**
- * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
* @nd_region: interleaved pmem region
*/
int generic_nvdimm_flush(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index b5aa55c61461..8aefb60c42ff 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -408,7 +408,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return rc;
}
-void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
+static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
int rc;
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 04bd28f17dcc..d90e4f0c08b7 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -23,7 +23,7 @@ u32 nvme_auth_get_seqnum(void)
mutex_lock(&nvme_dhchap_mutex);
if (!nvme_dhchap_seqnum)
- nvme_dhchap_seqnum = prandom_u32();
+ nvme_dhchap_seqnum = get_random_u32();
else {
nvme_dhchap_seqnum++;
if (!nvme_dhchap_seqnum)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 5fc5ea196b40..ff8b083dc5c6 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1039,6 +1039,8 @@ static void apple_nvme_reset_work(struct work_struct *work)
dma_max_mapping_size(anv->dev) >> 9);
anv->ctrl.max_segments = NVME_MAX_SEGS;
+ dma_set_max_seg_size(anv->dev, 0xffffffff);
+
/*
* Enable NVMMU and linear submission queues.
* While we could keep those disabled and pretend this is slightly
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 059737c1a2c1..dc4220600585 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3262,8 +3262,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
return ret;
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+ /*
+ * Do not return errors unless we are in a controller reset,
+ * the controller works perfectly fine without hwmon.
+ */
ret = nvme_hwmon_init(ctrl);
- if (ret < 0)
+ if (ret == -EINTR)
return ret;
}
@@ -4846,7 +4850,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return 0;
out_cleanup_admin_q:
- blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_mq_destroy_queue(ctrl->admin_q);
out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset);
return ret;
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 0a586d712920..9e6e56c20ec9 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -12,7 +12,7 @@
struct nvme_hwmon_data {
struct nvme_ctrl *ctrl;
- struct nvme_smart_log log;
+ struct nvme_smart_log *log;
struct mutex read_lock;
};
@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
- NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
+ NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
}
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
- struct nvme_smart_log *log = &data->log;
+ struct nvme_smart_log *log = data->log;
int temp;
int err;
@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
case hwmon_temp_max:
case hwmon_temp_min:
if ((!channel && data->ctrl->wctemp) ||
- (channel && data->log.temp_sensor[channel - 1])) {
+ (channel && data->log->temp_sensor[channel - 1])) {
if (data->ctrl->quirks &
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
return 0444;
@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
break;
case hwmon_temp_input:
case hwmon_temp_label:
- if (!channel || data->log.temp_sensor[channel - 1])
+ if (!channel || data->log->temp_sensor[channel - 1])
return 0444;
break;
default:
@@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return 0;
+ return -ENOMEM;
+
+ data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
+ if (!data->log) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
data->ctrl = ctrl;
mutex_init(&data->read_lock);
@@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data);
if (err) {
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
- kfree(data);
- return err;
+ goto err_free_log;
}
hwmon = hwmon_device_register_with_info(dev, "nvme",
@@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
- kfree(data);
- return PTR_ERR(hwmon);
+ err = PTR_ERR(hwmon);
+ goto err_free_log;
}
ctrl->hwmon_device = hwmon;
return 0;
+
+err_free_log:
+ kfree(data->log);
+err_free_data:
+ kfree(data);
+ return err;
}
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
@@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
hwmon_device_unregister(ctrl->hwmon_device);
ctrl->hwmon_device = NULL;
+ kfree(data->log);
kfree(data);
}
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 00f2f81e20fa..0ea7e441e080 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -182,6 +182,7 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
for_each_node(node)
rcu_assign_pointer(head->current_path[node], NULL);
+ kblockd_schedule_work(&head->requeue_work);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b796efa325b..31e577b01257 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3511,6 +3511,16 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
@@ -3521,12 +3531,16 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5ad0ab2853a4..6e079abb22ee 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -996,7 +996,7 @@ static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
+ flush_work(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 93e2e313fa70..1eed0fc26b3a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2181,7 +2181,7 @@ out_fail:
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ flush_work(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e34a2896fedb..9443ee1d4ae3 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1290,12 +1290,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
const char *page, size_t cnt)
{
- struct nvmet_port *port = to_nvmet_port(item);
u16 qid_max;
- if (nvmet_is_port_enabled(port, __func__))
- return -EACCES;
-
if (sscanf(page, "%hu\n", &qid_max) != 1)
return -EINVAL;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 14677145bbba..aecb5853f8da 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1176,7 +1176,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* reset the keep alive timer when the controller is enabled.
*/
if (ctrl->kato)
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index d72d879a6d34..ec8a49c04003 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -21,6 +21,40 @@ config NVMEM_SYSFS
This interface is mostly used by userspace applications to
read/write directly into nvmem.
+# Devices
+
+config NVMEM_APPLE_EFUSES
+ tristate "Apple eFuse support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to enable support for reading eFuses on Apple SoCs
+ such as the M1. These are e.g. used to store factory programmed
+ calibration data required for the PCIe or the USB-C PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-apple-efuses.
+
+config NVMEM_BCM_OCOTP
+ tristate "Broadcom On-Chip OTP Controller support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_BCM_IPROC
+ help
+ Say y here to enable read/write access to the Broadcom OTP
+ controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-bcm-ocotp.
+
+config NVMEM_BRCM_NVRAM
+ tristate "Broadcom's NVRAM support"
+ depends on ARCH_BCM_5301X || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides support for Broadcom's NVRAM that can be accessed
+ using I/O mapping.
+
config NVMEM_IMX_IIM
tristate "i.MX IC Identification Module support"
depends on ARCH_MXC || COMPILE_TEST
@@ -52,7 +86,7 @@ config NVMEM_IMX_OCOTP_SCU
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
-config JZ4780_EFUSE
+config NVMEM_JZ4780_EFUSE
tristate "JZ4780 EFUSE Memory Support"
depends on MACH_INGENIC || COMPILE_TEST
depends on HAS_IOMEM
@@ -64,6 +98,27 @@ config JZ4780_EFUSE
To compile this driver as a module, choose M here: the module
will be called nvmem_jz4780_efuse.
+config NVMEM_LAN9662_OTPC
+ tristate "Microchip LAN9662 OTP controller support"
+ depends on SOC_LAN966 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver enables the OTP controller available on Microchip LAN9662
+ SoCs. It controls the access to the OTP memory connected to it.
+
+config NVMEM_LAYERSCAPE_SFP
+ tristate "Layerscape SFP (Security Fuse Processor) support"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ This driver provides support to read the eFuses on Freescale
+ Layerscape SoC's. For example, the vendor provides a per part
+ unique ID there.
+
+ This driver can also be built as a module. If so, the module
+ will be called layerscape-sfp.
+
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -84,19 +139,34 @@ config NVMEM_LPC18XX_OTP
To compile this driver as a module, choose M here: the module
will be called nvmem_lpc18xx_otp.
-config NVMEM_MXS_OCOTP
- tristate "Freescale MXS On-Chip OTP Memory Support"
- depends on ARCH_MXS || COMPILE_TEST
- depends on HAS_IOMEM
+config NVMEM_MESON_EFUSE
+ tristate "Amlogic Meson GX eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
- If you say Y here, you will get readonly access to the
- One Time Programmable memory pages that are stored
- on the Freescale i.MX23/i.MX28 processor.
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
- will be called nvmem-mxs-ocotp.
+ will be called nvmem_meson_efuse.
+
+config NVMEM_MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
-config MTK_EFUSE
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_MICROCHIP_OTPC
+ tristate "Microchip OTPC support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This driver enable the OTP controller available on Microchip SAMA7G5
+ SoCs. It controlls the access to the OTP memory connected to it.
+
+config NVMEM_MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
@@ -107,12 +177,17 @@ config MTK_EFUSE
This driver can also be built as a module. If so, the module
will be called efuse-mtk.
-config MICROCHIP_OTPC
- tristate "Microchip OTPC support"
- depends on ARCH_AT91 || COMPILE_TEST
+config NVMEM_MXS_OCOTP
+ tristate "Freescale MXS On-Chip OTP Memory Support"
+ depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM
help
- This driver enable the OTP controller available on Microchip SAMA7G5
- SoCs. It controlls the access to the OTP memory connected to it.
+ If you say Y here, you will get readonly access to the
+ One Time Programmable memory pages that are stored
+ on the Freescale i.MX23/i.MX28 processor.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-mxs-ocotp.
config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support"
@@ -126,7 +201,7 @@ config NVMEM_NINTENDO_OTP
This driver can also be built as a module. If so, the module
will be called nvmem-nintendo-otp.
-config QCOM_QFPROM
+config NVMEM_QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
@@ -137,15 +212,23 @@ config QCOM_QFPROM
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
-config NVMEM_SPMI_SDAM
- tristate "SPMI SDAM Support"
- depends on SPMI
+config NVMEM_RAVE_SP_EEPROM
+ tristate "Rave SP EEPROM Support"
+ depends on RAVE_SP_CORE
help
- This driver supports the Shared Direct Access Memory Module on
- Qualcomm Technologies, Inc. PMICs. It provides the clients
- an interface to read/write to the SDAM module's shared memory.
+ Say y here to enable Rave SP EEPROM support.
+
+config NVMEM_RMEM
+ tristate "Reserved Memory Based Driver Support"
+ depends on HAS_IOMEM
+ help
+ This driver maps reserved memory into an nvmem device. It might be
+ useful to expose information left by firmware in memory.
-config ROCKCHIP_EFUSE
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-rmem.
+
+config NVMEM_ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
@@ -156,7 +239,7 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
-config ROCKCHIP_OTP
+config NVMEM_ROCKCHIP_OTP
tristate "Rockchip OTP controller support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
@@ -167,17 +250,45 @@ config ROCKCHIP_OTP
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_otp.
-config NVMEM_BCM_OCOTP
- tristate "Broadcom On-Chip OTP Controller support"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+config NVMEM_SC27XX_EFUSE
+ tristate "Spreadtrum SC27XX eFuse Support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
depends on HAS_IOMEM
- default ARCH_BCM_IPROC
help
- Say y here to enable read/write access to the Broadcom OTP
- controller.
+ This is a simple driver to dump specified values of Spreadtrum
+ SC27XX PMICs from eFuse.
This driver can also be built as a module. If so, the module
- will be called nvmem-bcm-ocotp.
+ will be called nvmem-sc27xx-efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
+config NVMEM_SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
config NVMEM_STM32_ROMEM
tristate "STMicroelectronics STM32 factory-programmed memory support"
@@ -189,6 +300,18 @@ config NVMEM_STM32_ROMEM
This driver can also be built as a module. If so, the module
will be called nvmem-stm32-romem.
+config NVMEM_SUNPLUS_OCOTP
+ tristate "Sunplus SoC OTP support"
+ depends on SOC_SP7021 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the On-chip OTP controller (OCOTP) available
+ on Sunplus SoCs. It provides access to 128 bytes of one-time
+ programmable eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sunplus-ocotp.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
@@ -199,7 +322,20 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
-config UNIPHIER_EFUSE
+config NVMEM_U_BOOT_ENV
+ tristate "U-Boot environment variables support"
+ depends on OF && MTD
+ select CRC32
+ help
+ U-Boot stores its setup as environment variables. This driver adds
+ support for verifying & exporting such data. It also exposes variables
+ as NVMEM cells so they can be referenced by other drivers.
+
+ Currently this drivers works only with env variables on top of MTD.
+
+ If compiled as module it will be called nvmem_u-boot-env.
+
+config NVMEM_UNIPHIER_EFUSE
tristate "UniPhier SoCs eFuse support"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on HAS_IOMEM
@@ -221,53 +357,6 @@ config NVMEM_VF610_OCOTP
This driver can also be build as a module. If so, the module will
be called nvmem-vf610-ocotp.
-config MESON_EFUSE
- tristate "Amlogic Meson GX eFuse Support"
- depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
- help
- This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson GX SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem_meson_efuse.
-
-config MESON_MX_EFUSE
- tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
- depends on ARCH_MESON || COMPILE_TEST
- help
- This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson6, Meson8 and Meson8b SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem_meson_mx_efuse.
-
-config NVMEM_SNVS_LPGPR
- tristate "Support for Low Power General Purpose Register"
- depends on ARCH_MXC || COMPILE_TEST
- help
- This is a driver for Low Power General Purpose Register (LPGPR) available on
- i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-snvs-lpgpr.
-
-config RAVE_SP_EEPROM
- tristate "Rave SP EEPROM Support"
- depends on RAVE_SP_CORE
- help
- Say y here to enable Rave SP EEPROM support.
-
-config SC27XX_EFUSE
- tristate "Spreadtrum SC27XX eFuse Support"
- depends on MFD_SC27XX_PMIC || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a simple driver to dump specified values of Spreadtrum
- SC27XX PMICs from eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sc27xx-efuse.
-
config NVMEM_ZYNQMP
bool "Xilinx ZYNQMP SoC nvmem firmware support"
depends on ARCH_ZYNQMP
@@ -278,70 +367,4 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
-config SPRD_EFUSE
- tristate "Spreadtrum SoC eFuse Support"
- depends on ARCH_SPRD || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a simple driver to dump specified values of Spreadtrum
- SoCs from eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sprd-efuse.
-
-config NVMEM_RMEM
- tristate "Reserved Memory Based Driver Support"
- depends on HAS_IOMEM
- help
- This driver maps reserved memory into an nvmem device. It might be
- useful to expose information left by firmware in memory.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-rmem.
-
-config NVMEM_BRCM_NVRAM
- tristate "Broadcom's NVRAM support"
- depends on ARCH_BCM_5301X || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This driver provides support for Broadcom's NVRAM that can be accessed
- using I/O mapping.
-
-config NVMEM_LAYERSCAPE_SFP
- tristate "Layerscape SFP (Security Fuse Processor) support"
- depends on ARCH_LAYERSCAPE || COMPILE_TEST
- depends on HAS_IOMEM
- select REGMAP_MMIO
- help
- This driver provides support to read the eFuses on Freescale
- Layerscape SoC's. For example, the vendor provides a per part
- unique ID there.
-
- This driver can also be built as a module. If so, the module
- will be called layerscape-sfp.
-
-config NVMEM_SUNPLUS_OCOTP
- tristate "Sunplus SoC OTP support"
- depends on SOC_SP7021 || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a driver for the On-chip OTP controller (OCOTP) available
- on Sunplus SoCs. It provides access to 128 bytes of one-time
- programmable eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sunplus-ocotp.
-
-config NVMEM_APPLE_EFUSES
- tristate "Apple eFuse support"
- depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
- help
- Say y here to enable support for reading eFuses on Apple SoCs
- such as the M1. These are e.g. used to store factory programmed
- calibration data required for the PCIe or the USB-C PHY.
-
- This driver can also be built as a module. If so, the module will
- be called nvmem-apple-efuses.
-
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index c710b64f9fe4..fa80fe17e567 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -7,65 +7,69 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
# Devices
-obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
-nvmem-bcm-ocotp-y := bcm-ocotp.o
-obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
-nvmem-imx-iim-y := imx-iim.o
-obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
-nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
+nvmem-apple-efuses-y := apple-efuses.o
+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y := bcm-ocotp.o
+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
+nvmem_brcm_nvram-y := brcm_nvram.o
+obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
+nvmem-imx-iim-y := imx-iim.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
+nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
-nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
-obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
-nvmem_jz4780_efuse-y := jz4780-efuse.o
+nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
+obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
+nvmem_jz4780_efuse-y := jz4780-efuse.o
+obj-$(CONFIG_NVMEM_LAN9662_OTPC) += nvmem-lan9662-otpc.o
+nvmem-lan9662-otpc-y := lan9662-otpc.o
+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
+nvmem-layerscape-sfp-y := layerscape-sfp.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
-nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
-obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
-nvmem_lpc18xx_otp-y := lpc18xx_otp.o
-obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
-nvmem-mxs-ocotp-y := mxs-ocotp.o
+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y := lpc18xx_otp.o
+obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
+nvmem-microchip-otpc-y := microchip-otpc.o
+obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y := mtk-efuse.o
+obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
+nvmem-mxs-ocotp-y := mxs-ocotp.o
obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o
-nvmem-nintendo-otp-y := nintendo-otp.o
-obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
-nvmem_mtk-efuse-y := mtk-efuse.o
-obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
-nvmem_qfprom-y := qfprom.o
-obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
-nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
-obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
-nvmem_rockchip_efuse-y := rockchip-efuse.o
-obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
-nvmem-rockchip-otp-y := rockchip-otp.o
-obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
-nvmem_stm32_romem-y := stm32-romem.o
-obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
-nvmem_sunxi_sid-y := sunxi_sid.o
-obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
-nvmem-uniphier-efuse-y := uniphier-efuse.o
-obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
-nvmem-vf610-ocotp-y := vf610-ocotp.o
-obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
-nvmem_meson_efuse-y := meson-efuse.o
-obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
-nvmem_meson_mx_efuse-y := meson-mx-efuse.o
-obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
-nvmem_snvs_lpgpr-y := snvs_lpgpr.o
-obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
-nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
-obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
-nvmem-sc27xx-efuse-y := sc27xx-efuse.o
-obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
-nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
-obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
-nvmem_sprd_efuse-y := sprd-efuse.o
-obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
-nvmem-rmem-y := rmem.o
-obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
-nvmem_brcm_nvram-y := brcm_nvram.o
-obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
-nvmem-layerscape-sfp-y := layerscape-sfp.o
+nvmem-nintendo-otp-y := nintendo-otp.o
+obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o
+nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
+nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
+obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
+nvmem-rmem-y := rmem.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
+nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
+obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
+nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
+obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
+obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
+nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
-nvmem_sunplus_ocotp-y := sunplus-ocotp.o
-obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
-nvmem-apple-efuses-y := apple-efuses.o
-obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
-nvmem-microchip-otpc-y := microchip-otpc.o
+nvmem_sunplus_ocotp-y := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o
+nvmem_u-boot-env-y := u-boot-env.o
+obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
+obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
+nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 450b927691c3..4441daa20965 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -96,7 +96,7 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
len = le32_to_cpu(header.len);
- data = kcalloc(1, len, GFP_KERNEL);
+ data = kzalloc(len, GFP_KERNEL);
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 1e3c754efd0d..321d7d63e068 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -810,18 +810,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
switch (config->id) {
case NVMEM_DEVID_NONE:
- dev_set_name(&nvmem->dev, "%s", config->name);
+ rval = dev_set_name(&nvmem->dev, "%s", config->name);
break;
case NVMEM_DEVID_AUTO:
- dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+ rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
break;
default:
- dev_set_name(&nvmem->dev, "%s%d",
+ rval = dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
break;
}
+ if (rval) {
+ ida_free(&nvmem_ida, nvmem->id);
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
+
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
@@ -829,21 +835,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.groups = nvmem_dev_groups;
#endif
- if (nvmem->nkeepout) {
- rval = nvmem_validate_keepouts(nvmem);
- if (rval) {
- ida_free(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
- }
- }
-
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_register(&nvmem->dev);
if (rval)
goto err_put_device;
+ if (nvmem->nkeepout) {
+ rval = nvmem_validate_keepouts(nvmem);
+ if (rval)
+ goto err_device_del;
+ }
+
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c
new file mode 100644
index 000000000000..f6732fd216d8
--- /dev/null
+++ b/drivers/nvmem/lan9662-otpc.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define OTP_OTP_PWR_DN(t) (t + 0x00)
+#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0)
+#define OTP_OTP_ADDR_HI(t) (t + 0x04)
+#define OTP_OTP_ADDR_LO(t) (t + 0x08)
+#define OTP_OTP_PRGM_DATA(t) (t + 0x10)
+#define OTP_OTP_PRGM_MODE(t) (t + 0x14)
+#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0)
+#define OTP_OTP_RD_DATA(t) (t + 0x18)
+#define OTP_OTP_FUNC_CMD(t) (t + 0x20)
+#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1)
+#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0)
+#define OTP_OTP_CMD_GO(t) (t + 0x28)
+#define OTP_OTP_CMD_GO_OTP_GO BIT(0)
+#define OTP_OTP_PASS_FAIL(t) (t + 0x2c)
+#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3)
+#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2)
+#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0)
+#define OTP_OTP_STATUS(t) (t + 0x30)
+#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1)
+#define OTP_OTP_STATUS_OTP_BUSY BIT(0)
+
+#define OTP_MEM_SIZE 8192
+#define OTP_SLEEP_US 10
+#define OTP_TIMEOUT_US 500000
+
+struct lan9662_otp {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static bool lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
+{
+ u32 val;
+
+ return readl_poll_timeout(reg, val, !(val & flag),
+ OTP_SLEEP_US, OTP_TIMEOUT_US);
+}
+
+static int lan9662_otp_power(struct lan9662_otp *otp, bool up)
+{
+ void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base);
+
+ if (up) {
+ writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_CPUMPEN))
+ return -ETIMEDOUT;
+ } else {
+ writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ }
+
+ return 0;
+}
+
+static int lan9662_otp_execute(struct lan9662_otp *otp)
+{
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base),
+ OTP_OTP_CMD_GO_OTP_GO))
+ return -ETIMEDOUT;
+
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_BUSY))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset)
+{
+ writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base));
+ writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base));
+}
+
+static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED)
+ return -EACCES;
+ *dst = (u8) readl(OTP_OTP_RD_DATA(otp->base));
+ }
+ return rc;
+}
+
+static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base));
+ writel(data, OTP_OTP_PRGM_DATA(otp->base));
+ writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED)
+ return -EACCES;
+ if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL)
+ return -EIO;
+ }
+ return rc;
+}
+
+static int lan9662_otp_read(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ uint8_t data;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+ *val++ = data;
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static int lan9662_otp_write(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ u8 data, newdata;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ /* Skip zero bytes */
+ if (val[i]) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+
+ newdata = data | val[i];
+ if (newdata == data)
+ continue;
+
+ rc = lan9662_otp_write_byte(otp, offset + i,
+ newdata);
+ if (rc < 0)
+ break;
+ }
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "lan9662-otp",
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = lan9662_otp_read,
+ .reg_write = lan9662_otp_write,
+ .size = OTP_MEM_SIZE,
+};
+
+static int lan9662_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct lan9662_otp *otp;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id lan9662_otp_match[] = {
+ { .compatible = "microchip,lan9662-otp", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lan9662_otp_match);
+
+static struct platform_driver lan9662_otp_driver = {
+ .probe = lan9662_otp_probe,
+ .driver = {
+ .name = "lan9662-otp",
+ .of_match_table = lan9662_otp_match,
+ },
+};
+module_platform_driver(lan9662_otp_driver);
+
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_DESCRIPTION("lan9662 OTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
new file mode 100644
index 000000000000..8e72d1bbd649
--- /dev/null
+++ b/drivers/nvmem/u-boot-env.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum u_boot_env_format {
+ U_BOOT_FORMAT_SINGLE,
+ U_BOOT_FORMAT_REDUNDANT,
+};
+
+struct u_boot_env {
+ struct device *dev;
+ enum u_boot_env_format format;
+
+ struct mtd_info *mtd;
+
+ /* Cells */
+ struct nvmem_cell_info *cells;
+ int ncells;
+};
+
+struct u_boot_env_image_single {
+ __le32 crc32;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_redundant {
+ __le32 crc32;
+ u8 mark;
+ uint8_t data[];
+} __packed;
+
+static int u_boot_env_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct u_boot_env *priv = context;
+ struct device *dev = priv->dev;
+ size_t bytes_read;
+ int err;
+
+ err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val);
+ if (err && !mtd_is_bitflip(err)) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ return err;
+ }
+
+ if (bytes_read != bytes) {
+ dev_err(dev, "Failed to read %zu bytes\n", bytes);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+{
+ struct device *dev = priv->dev;
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+ int idx;
+
+ priv->ncells = 0;
+ for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
+ priv->ncells++;
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+ if (!priv->cells)
+ return -ENOMEM;
+
+ for (var = data, idx = 0;
+ var < data + data_len && *var;
+ var = value + strlen(value) + 1, idx++) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+ priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!priv->cells[idx].name)
+ return -ENOMEM;
+ priv->cells[idx].offset = data_offset + value - data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ }
+
+ if (WARN_ON(idx != priv->ncells))
+ priv->ncells = idx;
+
+ return 0;
+}
+
+static int u_boot_env_parse(struct u_boot_env *priv)
+{
+ struct device *dev = priv->dev;
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+ size_t data_offset;
+ size_t data_len;
+ uint32_t crc32;
+ uint32_t calc;
+ size_t bytes;
+ uint8_t *buf;
+ int err;
+
+ buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
+ if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ goto err_kfree;
+ }
+
+ switch (priv->format) {
+ case U_BOOT_FORMAT_SINGLE:
+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
+ data_offset = offsetof(struct u_boot_env_image_single, data);
+ break;
+ case U_BOOT_FORMAT_REDUNDANT:
+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark);
+ data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ break;
+ }
+ crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
+ crc32_data_len = priv->mtd->size - crc32_data_offset;
+ data_len = priv->mtd->size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
+ err = -EINVAL;
+ goto err_kfree;
+ }
+
+ buf[priv->mtd->size - 1] = '\0';
+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
+ if (err)
+ dev_err(dev, "Failed to add cells: %d\n", err);
+
+err_kfree:
+ kfree(buf);
+err_out:
+ return err;
+}
+
+static int u_boot_env_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .name = "u-boot-env",
+ .reg_read = u_boot_env_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct u_boot_env *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ priv->format = (uintptr_t)of_device_get_match_data(dev);
+
+ priv->mtd = of_get_mtd_device_by_node(np);
+ if (IS_ERR(priv->mtd)) {
+ dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np);
+ return PTR_ERR(priv->mtd);
+ }
+
+ err = u_boot_env_parse(priv);
+ if (err)
+ return err;
+
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->mtd->size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id u_boot_env_of_match_table[] = {
+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ {},
+};
+
+static struct platform_driver u_boot_env_driver = {
+ .probe = u_boot_env_probe,
+ .driver = {
+ .name = "u_boot_env",
+ .of_match_table = u_boot_env_of_match_table,
+ },
+};
+module_platform_driver(u_boot_env_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 96f0a12e507c..c34ac33b7338 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -579,7 +579,8 @@ u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_address);
-static struct device_node *__of_get_dma_parent(const struct device_node *np)
+#ifdef CONFIG_HAS_DMA
+struct device_node *__of_get_dma_parent(const struct device_node *np)
{
struct of_phandle_args args;
int ret, index;
@@ -596,6 +597,7 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+#endif
static struct device_node *of_get_next_dma_parent(struct device_node *np)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 42da760e0f45..d5a5c35eba72 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -561,7 +561,7 @@ EXPORT_SYMBOL(of_device_is_compatible);
* a NULL terminated array of strings. Returns the best match
* score or 0.
*/
-int of_device_compatible_match(struct device_node *device,
+int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -1229,7 +1229,7 @@ int of_modalias_node(struct device_node *node, char *modalias, int len)
if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
p = strchr(compatible, ',');
- strlcpy(modalias, p ? p + 1 : compatible, len);
+ strscpy(modalias, p ? p + 1 : compatible, len);
return 0;
}
EXPORT_SYMBOL_GPL(of_modalias_node);
@@ -2089,12 +2089,13 @@ int of_find_last_cache_level(unsigned int cpu)
struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
while (np) {
+ of_node_put(prev);
prev = np;
- of_node_put(np);
np = of_find_next_cache_node(np);
}
of_property_read_u32(prev, "cache-level", &cache_level);
+ of_node_put(prev);
return cache_level;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 75b6cbffa755..8cefe5a7d04e 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -116,12 +116,19 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
{
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
+ struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
int ret;
- ret = of_dma_get_range(np, &map);
+ if (np == dev->of_node)
+ bus_np = __of_get_dma_parent(np);
+ else
+ bus_np = of_node_get(np);
+
+ ret = of_dma_get_range(bus_np, &map);
+ of_node_put(bus_np);
if (ret < 0) {
/*
* For legacy reasons, we have to assume some devices need
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 1c573e7a60bc..7b571a631639 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -828,15 +828,6 @@ uint32_t __init of_get_flat_dt_phandle(unsigned long node)
return fdt_get_phandle(initial_boot_params, node);
}
-struct fdt_scan_status {
- const char *name;
- int namelen;
- int depth;
- int found;
- int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
- void *data;
-};
-
const char * __init of_flat_dt_get_machine_name(void)
{
const char *name;
@@ -936,6 +927,8 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
if (!prop)
return;
end = of_read_number(prop, len/4);
+ if (start > end)
+ return;
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
@@ -1178,7 +1171,7 @@ int __init early_init_dt_scan_chosen(char *cmdline)
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+ strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
@@ -1190,11 +1183,11 @@ int __init early_init_dt_scan_chosen(char *cmdline)
strlcat(cmdline, " ", COMMAND_LINE_SIZE);
strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)cmdline)[0])
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index d22f605fa7ee..2bac44f09554 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -592,6 +592,9 @@ void __init of_irq_init(const struct of_device_id *matches)
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ pr_err("%s: Failed to init %pOF (%p), parent %p\n",
+ __func__, desc->dev, desc->dev,
+ desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 9324483397f6..fb6792d381a6 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -155,12 +155,17 @@ struct bus_dma_region;
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map);
+struct device_node *__of_get_dma_parent(const struct device_node *np);
#else
static inline int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map)
{
return -ENODEV;
}
+static inline struct device_node *__of_get_dma_parent(const struct device_node *np)
+{
+ return of_get_parent(np);
+}
#endif
void fdt_init_reserved_mem(void);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index f9614552db82..b89ab5d9fea5 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2465,7 +2465,7 @@ static int unittest_i2c_bus_probe(struct platform_device *pdev)
adap = &std->adap;
i2c_set_adapdata(adap, std);
adap->nr = -1;
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &unittest_i2c_algo;
adap->dev.parent = dev;
@@ -3465,6 +3465,9 @@ static int __init of_unittest(void)
pr_info("start of unittest - you will see error messages\n");
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index f54a6f450391..f0cb31198a8f 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -393,7 +393,7 @@ static int parse_slot_config(int slot,
}
if (p0 + function_len < pos) {
- printk(KERN_ERR "eisa_enumerator: function %d length mis-match "
+ printk(KERN_ERR "eisa_enumerator: function %d length mismatch "
"got %d, expected %d\n",
num_func, pos-p0, function_len);
res=-1;
@@ -407,13 +407,13 @@ static int parse_slot_config(int slot,
}
if (pos != es->config_data_length) {
- printk(KERN_ERR "eisa_enumerator: config data length mis-match got %d, expected %d\n",
+ printk(KERN_ERR "eisa_enumerator: config data length mismatch got %d, expected %d\n",
pos, es->config_data_length);
res=-1;
}
if (num_func != es->num_functions) {
- printk(KERN_ERR "eisa_enumerator: number of functions mis-match got %d, expected %d\n",
+ printk(KERN_ERR "eisa_enumerator: number of functions mismatch got %d, expected %d\n",
num_func, es->num_functions);
res=-2;
}
@@ -451,7 +451,7 @@ static int init_slot(int slot, struct eeprom_eisa_slot_info *es)
}
if (es->eisa_slot_id != id) {
print_eisa_id(id_string, id);
- printk(KERN_ERR "EISA slot %d id mis-match: got %s",
+ printk(KERN_ERR "EISA slot %d id mismatch: got %s",
slot, id_string);
print_eisa_id(id_string, es->eisa_slot_id);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index eda4ded4d5e5..7c45927e2131 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2604,6 +2604,7 @@ enum parport_pc_pci_cards {
oxsemi_pcie_pport,
aks_0100,
mobility_pp,
+ netmos_9900,
netmos_9705,
netmos_9715,
netmos_9755,
@@ -2665,6 +2666,7 @@ static struct parport_pc_pci {
/* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
+ /* netmos_9900 */ { 1, { { 0, -1 }, } },
/* The netmos entries below are untested */
/* netmos_9705 */ { 1, { { 0, -1 }, } },
@@ -2746,6 +2748,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
{ 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
/* NetMos communication controllers */
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x2000, 0, 0, netmos_9900 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9715,
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6e5debdbc55b..2616585ca5f8 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -51,6 +51,7 @@ enum imx6_pcie_variants {
IMX7D,
IMX8MQ,
IMX8MM,
+ IMX8MP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -61,6 +62,7 @@ struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
u32 flags;
int dbi_length;
+ const char *gpr;
};
struct imx6_pcie {
@@ -150,7 +152,8 @@ struct imx6_pcie {
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
+ imx6_pcie->drvdata->variant != IMX8MM &&
+ imx6_pcie->drvdata->variant != IMX8MP);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -301,6 +304,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
case IMX8MM:
+ case IMX8MP:
/*
* The PHY initialization had been done in the PHY
* driver, break here directly.
@@ -558,6 +562,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -602,6 +607,7 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -669,6 +675,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
reset_control_assert(imx6_pcie->pciephy_reset);
fallthrough;
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -744,6 +751,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
case IMX6Q: /* Nothing to do */
case IMX8MM:
+ case IMX8MP:
break;
}
@@ -793,6 +801,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -812,6 +821,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
}
@@ -935,7 +945,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_init(imx6_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
@@ -949,7 +959,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ ret = phy_power_on(imx6_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
goto err_phy_off;
@@ -961,7 +971,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
err_phy_off:
if (imx6_pcie->phy)
- phy_power_off(imx6_pcie->phy);
+ phy_exit(imx6_pcie->phy);
err_clk_disable:
imx6_pcie_clk_disable(imx6_pcie);
err_reg_disable:
@@ -1179,6 +1189,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MM:
+ case IMX8MP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@@ -1216,7 +1227,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
@@ -1295,12 +1306,14 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1308,17 +1321,26 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX7D] = {
.variant = IMX7D,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
},
[IMX8MM] = {
.variant = IMX8MM,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
},
};
@@ -1329,6 +1351,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7746f94a715f..39f3b37d4033 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -267,15 +267,6 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
-
- if (pp->msi_data) {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
-
- dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
- if (pp->msi_page)
- __free_page(pp->msi_page);
- }
}
static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
@@ -336,6 +327,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
int ret;
u32 ctrl, num_ctrls;
@@ -375,22 +367,16 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
- pp->msi_page = alloc_page(GFP_DMA32);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- ret = dma_mapping_error(dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
- pp->msi_data = 0;
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
dw_pcie_free_msi(pp);
-
- return ret;
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 09b887093a84..a871ae7eb59e 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -243,7 +243,6 @@ struct dw_pcie_rp {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 7f67aad71df4..d09507f822a7 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
@@ -366,12 +367,11 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
char name[32];
int ret, i;
/* This is an optional property */
- ret = of_gpio_named_count(np, "hisilicon,clken-gpios");
+ ret = gpiod_count(dev, "hisilicon,clken");
if (ret < 0)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index ec99116ad05c..6d0d1b759ca2 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/syscon.h>
@@ -26,6 +27,7 @@
#define PARF_SYS_CTRL 0x00
#define PARF_DB_CTRL 0x10
#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_MHI_BASE_ADDR_LOWER 0x178
#define PARF_MHI_BASE_ADDR_UPPER 0x17c
#define PARF_DEBUG_INT_EN 0x190
@@ -45,6 +47,11 @@
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -83,6 +90,9 @@
#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
@@ -95,6 +105,7 @@
/* PARF_SYS_CTRL register fields */
#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
/* PARF_DB_CTRL register fields */
@@ -130,21 +141,33 @@ enum qcom_pcie_ep_link_status {
QCOM_PCIE_EP_LINK_DOWN,
};
-static struct clk_bulk_data qcom_pcie_ep_clks[] = {
- { .id = "cfg" },
- { .id = "aux" },
- { .id = "bus_master" },
- { .id = "bus_slave" },
- { .id = "ref" },
- { .id = "sleep" },
- { .id = "slave_q2a" },
-};
-
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @elbi: Designware PCIe specific ELBI register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
void __iomem *elbi;
+ void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -152,6 +175,10 @@ struct qcom_pcie_ep {
struct gpio_desc *reset;
struct gpio_desc *wake;
struct phy *phy;
+ struct dentry *debugfs;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
u32 perst_en;
u32 perst_sep_en;
@@ -193,8 +220,10 @@ static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
*/
static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
{
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
}
static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
@@ -227,8 +256,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
int ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
if (ret)
return ret;
@@ -249,8 +277,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
err_phy_exit:
phy_exit(pcie_ep->phy);
err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
return ret;
}
@@ -259,8 +286,7 @@ static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
{
phy_power_off(pcie_ep->phy);
phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
}
static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
@@ -318,8 +344,14 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_Q2A_FLUSH_EN;
writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
- /* Disable DBI Wakeup, core clock CGC and enable AUX power */
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
PARF_SYS_CTRL_AUX_PWR_DET;
@@ -375,6 +407,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
/* Enable LTSSM */
@@ -437,11 +474,19 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
if (!syscon) {
- dev_err(dev, "Failed to parse qcom,perst-regs\n");
- return -EINVAL;
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
}
pcie_ep->perst_map = syscon_node_to_regmap(syscon);
@@ -474,14 +519,15 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
if (ret) {
- dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
+ dev_err(dev, "Failed to get io resources %d\n", ret);
return ret;
}
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
- return ret;
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
if (IS_ERR(pcie_ep->core_reset))
@@ -495,7 +541,7 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
if (IS_ERR(pcie_ep->wake))
return PTR_ERR(pcie_ep->wake);
- pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
if (IS_ERR(pcie_ep->phy))
ret = PTR_ERR(pcie_ep->phy);
@@ -571,13 +617,13 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
- int irq, ret;
+ int ret;
- irq = platform_get_irq_byname(pdev, "global");
- if (irq < 0)
- return irq;
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
"global_irq", pcie_ep);
@@ -594,7 +640,7 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
"perst_irq", pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
- disable_irq(irq);
+ disable_irq(pcie_ep->global_irq);
return ret;
}
@@ -617,6 +663,37 @@ static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
.core_init_notifier = true,
@@ -649,6 +726,7 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_pcie_ep *pcie_ep;
+ char *name;
int ret;
pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
@@ -680,8 +758,21 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
goto err_disable_resources;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
return 0;
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
err_disable_resources:
qcom_pcie_disable_resources(pcie_ep);
@@ -692,6 +783,11 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
{
struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
@@ -702,8 +798,10 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 66886dc6e777..f711acacaeaf 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -180,7 +180,7 @@ struct qcom_pcie_resources_2_3_3 {
/* 6 clocks typically, 7 for sm8250 */
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[9];
+ struct clk_bulk_data clks[12];
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
@@ -208,17 +208,12 @@ struct qcom_pcie_ops {
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
- void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
};
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int has_tbu_clk:1;
- unsigned int has_ddrss_sf_tbu_clk:1;
- unsigned int has_aggre0_clk:1;
- unsigned int has_aggre1_clk:1;
};
struct qcom_pcie {
@@ -1175,6 +1170,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ unsigned int num_clks, num_opt_clks;
unsigned int idx;
int ret;
@@ -1195,18 +1191,25 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
res->clks[idx++].id = "bus_master";
res->clks[idx++].id = "bus_slave";
res->clks[idx++].id = "slave_q2a";
- if (pcie->cfg->has_tbu_clk)
- res->clks[idx++].id = "tbu";
- if (pcie->cfg->has_ddrss_sf_tbu_clk)
- res->clks[idx++].id = "ddrss_sf_tbu";
- if (pcie->cfg->has_aggre0_clk)
- res->clks[idx++].id = "aggre0";
- if (pcie->cfg->has_aggre1_clk)
- res->clks[idx++].id = "aggre1";
+ num_clks = idx;
+
+ ret = devm_clk_bulk_get(dev, num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->clks[idx++].id = "tbu";
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ res->clks[idx++].id = "aggre0";
+ res->clks[idx++].id = "aggre1";
+ res->clks[idx++].id = "noc_aggr_4";
+ res->clks[idx++].id = "noc_aggr_south_sf";
+ res->clks[idx++].id = "cnoc_qx";
+
+ num_opt_clks = idx - num_clks;
res->num_clks = idx;
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
if (ret < 0)
return ret;
@@ -1509,15 +1512,13 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (pcie->cfg->ops->config_sid) {
ret = pcie->cfg->ops->config_sid(pcie);
if (ret)
- goto err;
+ goto err_assert_reset;
}
return 0;
-err:
+err_assert_reset:
qcom_ep_reset_assert(pcie);
- if (pcie->cfg->ops->post_deinit)
- pcie->cfg->ops->post_deinit(pcie);
err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
@@ -1601,68 +1602,35 @@ static const struct qcom_pcie_ops ops_2_9_0 = {
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
-static const struct qcom_pcie_cfg apq8084_cfg = {
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
.ops = &ops_1_0_0,
};
-static const struct qcom_pcie_cfg ipq8064_cfg = {
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
-static const struct qcom_pcie_cfg msm8996_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
};
-static const struct qcom_pcie_cfg ipq8074_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
.ops = &ops_2_3_3,
};
-static const struct qcom_pcie_cfg ipq4019_cfg = {
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
.ops = &ops_2_4_0,
};
-static const struct qcom_pcie_cfg sdm845_cfg = {
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
.ops = &ops_2_7_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8150_cfg = {
- /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as
- * 1.9.0, so reuse the same.
- */
- .ops = &ops_1_9_0,
-};
-
-static const struct qcom_pcie_cfg sm8250_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
- .has_ddrss_sf_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre0_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc7280_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc8180x_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
};
-static const struct qcom_pcie_cfg ipq6018_cfg = {
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
@@ -1761,22 +1729,24 @@ err_pm_runtime_put:
}
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg },
- { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg },
- { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg },
- { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
- { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
- { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
- { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
- { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 966c8b48bd96..ba36bbc5897d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -33,6 +33,7 @@
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_SSDEV_ID_REG 0x2c
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
@@ -1077,7 +1078,10 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
+ bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index 88980a44461d..0cfd9d5a497c 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -103,13 +103,6 @@
#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
-/* Defines for PCI configuration command register */
-#define PCI_CONF_ENABLE BIT(31)
-#define PCI_CONF_WHERE(r) ((r) & 0xFC)
-#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16)
-#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11)
-#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8)
-
/**
* struct faraday_pci_variant - encodes IP block differences
* @cascaded_irq: this host has cascaded IRQs from an interrupt controller
@@ -190,11 +183,8 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
unsigned int fn, int config, int size,
u32 *value)
{
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
*value = readl(p->base + FTPCI_DATA);
@@ -225,11 +215,8 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
{
int ret = PCIBIOS_SUCCESSFUL;
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
switch (size) {
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index af915c951f06..1ced73726a26 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -523,7 +523,7 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
/* Are the new iobase/iolimit values invalid? */
if (conf->iolimit < conf->iobase ||
- conf->iolimitupper < conf->iobaseupper)
+ le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
@@ -535,10 +535,10 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* is the CPU address.
*/
desired.remap = ((conf->iobase & 0xF0) << 8) |
- (conf->iobaseupper << 16);
+ (le16_to_cpu(conf->iobaseupper) << 16);
desired.base = port->pcie->io.start + desired.remap;
desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
- (conf->iolimitupper << 16)) -
+ (le16_to_cpu(conf->iolimitupper) << 16)) -
desired.remap) +
1;
@@ -552,7 +552,7 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (conf->memlimit < conf->membase)
+ if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
@@ -562,8 +562,8 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((conf->membase & 0xFFF0) << 16);
- desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
+ desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
@@ -946,6 +946,7 @@ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->subsystem_vendor_id = ssdev_id & 0xffff;
bridge->subsystem_id = ssdev_id >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CAP_PCIEXP;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8e323e93be91..24478ae5a345 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -415,13 +415,6 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
* address (access to which generates correct config transaction) falls in
* this 4 KiB region.
*/
-static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
- unsigned int where)
-{
- return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
- (PCI_FUNC(devfn) << 8) | (where & 0xff);
-}
-
static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
@@ -443,7 +436,9 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int offset;
u32 base;
- offset = tegra_pcie_conf_offset(bus->number, devfn, where);
+ offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE;
/* move 4 KiB window to offset within the FPCI region */
base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a2c3c207a04b..66f37e403a09 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -516,8 +516,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
u32 stat, idx;
int ret, i;
- reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
- GPIOD_OUT_LOW, "PERST#");
+ reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
+ GPIOD_OUT_LOW, "PERST#");
if (IS_ERR(reset))
return PTR_ERR(reset);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 11cdb9b6f109..b8612ce5f4d0 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -1071,7 +1071,7 @@ static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
.remove = mtk_pcie_remove,
.driver = {
- .name = "mtk-pcie",
+ .name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
},
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 33eb37a2225c..4bd1abf26008 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -30,6 +30,8 @@
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include "../pci.h"
+
/* MediaTek-specific configuration registers */
#define PCIE_FTS_NUM 0x70c
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
@@ -120,19 +122,12 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port,
writel_relaxed(val, port->base + reg);
}
-static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot,
- unsigned int func, unsigned int where)
-{
- return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
- (func << 8) | (where & 0xfc) | 0x80000000;
-}
-
static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mt7621_pcie *pcie = bus->sysdata;
- u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
@@ -147,7 +142,7 @@ static struct pci_ops mt7621_pcie_ops = {
static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
@@ -156,7 +151,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
u32 reg, u32 val)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 9037a7827eca..fdd2ec09651e 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -526,7 +526,7 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!desc.pci.msi_attrib.is_virtual;
- if (!desc.pci.msi_attrib.can_mask) {
+ if (desc.pci.msi_attrib.can_mask) {
addr = pci_msix_desc_addr(&desc);
desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 4496a7c5c478..88dc66ee1c46 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -649,7 +649,7 @@ struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
if (!closest_pdevs)
return NULL;
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ for_each_pci_dev(pdev) {
if (!pci_has_p2pmem(pdev))
continue;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9c2ca28e3ecf..9334b2dd4764 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -22,11 +22,7 @@
#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
#define PCI_CAP_SSID_SIZEOF (PCI_SSVID_DEVICE_ID + 2)
-#define PCI_CAP_SSID_START PCI_BRIDGE_CONF_END
-#define PCI_CAP_SSID_END (PCI_CAP_SSID_START + PCI_CAP_SSID_SIZEOF)
#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
-#define PCI_CAP_PCIE_START PCI_CAP_SSID_END
-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
/**
* struct pci_bridge_reg_behavior - register bits behaviors
@@ -324,7 +320,7 @@ pci_bridge_emul_read_ssid(struct pci_bridge_emul *bridge, int reg, u32 *value)
switch (reg) {
case PCI_CAP_LIST_ID:
*value = PCI_CAP_ID_SSVID |
- (bridge->has_pcie ? (PCI_CAP_PCIE_START << 8) : 0);
+ ((bridge->pcie_start > bridge->ssid_start) ? (bridge->pcie_start << 8) : 0);
return PCI_BRIDGE_EMUL_HANDLED;
case PCI_SSVID_VENDOR_ID:
@@ -365,18 +361,33 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
if (!bridge->pci_regs_behavior)
return -ENOMEM;
- if (bridge->subsystem_vendor_id)
- bridge->conf.capabilities_pointer = PCI_CAP_SSID_START;
- else if (bridge->has_pcie)
- bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
- else
- bridge->conf.capabilities_pointer = 0;
+ /* If ssid_start and pcie_start were not specified then choose the lowest possible value. */
+ if (!bridge->ssid_start && !bridge->pcie_start) {
+ if (bridge->subsystem_vendor_id)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ if (bridge->has_pcie)
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ } else if (!bridge->ssid_start && bridge->subsystem_vendor_id) {
+ if (bridge->pcie_start - PCI_BRIDGE_CONF_END >= PCI_CAP_SSID_SIZEOF)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->ssid_start = bridge->pcie_start + PCI_CAP_PCIE_SIZEOF;
+ } else if (!bridge->pcie_start && bridge->has_pcie) {
+ if (bridge->ssid_start - PCI_BRIDGE_CONF_END >= PCI_CAP_PCIE_SIZEOF)
+ bridge->pcie_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ }
+
+ bridge->conf.capabilities_pointer = min(bridge->ssid_start, bridge->pcie_start);
if (bridge->conf.capabilities_pointer)
bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST);
if (bridge->has_pcie) {
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ bridge->pcie_conf.next = (bridge->ssid_start > bridge->pcie_start) ?
+ bridge->ssid_start : 0;
bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
@@ -459,15 +470,17 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
read_op = bridge->ops->read_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_SSID_START && reg < PCI_CAP_SSID_END && bridge->subsystem_vendor_id) {
+ } else if (reg >= bridge->ssid_start && reg < bridge->ssid_start + PCI_CAP_SSID_SIZEOF &&
+ bridge->subsystem_vendor_id) {
/* Emulated PCI Bridge Subsystem Vendor ID capability */
- reg -= PCI_CAP_SSID_START;
+ reg -= bridge->ssid_start;
read_op = pci_bridge_emul_read_ssid;
cfgspace = NULL;
behavior = NULL;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
read_op = bridge->ops->read_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
@@ -538,9 +551,10 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
write_op = bridge->ops->write_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
write_op = bridge->ops->write_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index 71392b67471d..2a0e59c7f0d9 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -131,6 +131,8 @@ struct pci_bridge_emul {
struct pci_bridge_reg_behavior *pci_regs_behavior;
struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
void *data;
+ u8 pcie_start;
+ u8 ssid_start;
bool has_pcie;
u16 subsystem_vendor_id;
u16 subsystem_id;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 49238ddd39ee..107d77f3c846 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev)
pci_dev->skip_bus_pm = false;
+ /*
+ * Disabling PTM allows some systems, e.g., Intel mobile chips
+ * since Coffee Lake, to enter a lower-power PM state.
+ */
+ pci_suspend_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -867,20 +873,15 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
}
- if (pci_dev->skip_bus_pm) {
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+
/*
- * Either the device is a bridge with a child in D0 below it, or
- * the function is running for the second time in a row without
- * going through full resume, which is possible only during
- * suspend-to-idle in a spurious wakeup case. The device should
- * be in D0 at this point, but if it is a bridge, it may be
- * necessary to save its state.
+ * If the device is a bridge with a child in D0 below it,
+ * it needs to stay in D0, so check skip_bus_pm to avoid
+ * putting it into a low-power state in that case.
*/
- if (!pci_dev->state_saved)
- pci_save_state(pci_dev);
- } else if (!pci_dev->state_saved) {
- pci_save_state(pci_dev);
- if (pci_power_manageable(pci_dev))
+ if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -987,6 +988,8 @@ static int pci_pm_resume(struct device *dev)
if (pci_dev->state_saved)
pci_restore_standard_config(pci_dev);
+ pci_resume_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -1274,6 +1277,8 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_power_t prev = pci_dev->current_state;
int error;
+ pci_suspend_ptm(pci_dev);
+
/*
* If pci_dev->driver is not set (unbound), we leave the device in D0,
* but it may go to D3cold when the bridge above it runtime suspends.
@@ -1335,6 +1340,7 @@ static int pci_pm_runtime_resume(struct device *dev)
* D3cold when the bridge above it runtime suspended.
*/
pci_pm_default_resume_early(pci_dev);
+ pci_resume_ptm(pci_dev);
if (!pci_dev->driver)
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fc804e08e3cb..0a2eeb82cebd 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/aperture.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -1373,6 +1374,112 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+#define pci_dev_resource_resize_attr(n) \
+static ssize_t resource##n##_resize_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char * buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ ssize_t ret; \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ ret = sysfs_emit(buf, "%016llx\n", \
+ (u64)pci_rebar_get_possible_sizes(pdev, n)); \
+ \
+ pci_config_pm_runtime_put(pdev); \
+ \
+ return ret; \
+} \
+ \
+static ssize_t resource##n##_resize_store(struct device *dev, \
+ struct device_attribute *attr,\
+ const char *buf, size_t count)\
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ unsigned long size, flags; \
+ int ret, i; \
+ u16 cmd; \
+ \
+ if (kstrtoul(buf, 0, &size) < 0) \
+ return -EINVAL; \
+ \
+ device_lock(dev); \
+ if (dev->driver) { \
+ ret = -EBUSY; \
+ goto unlock; \
+ } \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
+ ret = aperture_remove_conflicting_pci_devices(pdev, \
+ "resourceN_resize"); \
+ if (ret) \
+ goto pm_put; \
+ } \
+ \
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
+ pci_write_config_word(pdev, PCI_COMMAND, \
+ cmd & ~PCI_COMMAND_MEMORY); \
+ \
+ flags = pci_resource_flags(pdev, n); \
+ \
+ pci_remove_resource_files(pdev); \
+ \
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
+ if (pci_resource_len(pdev, i) && \
+ pci_resource_flags(pdev, i) == flags) \
+ pci_release_resource(pdev, i); \
+ } \
+ \
+ ret = pci_resize_resource(pdev, n, size); \
+ \
+ pci_assign_unassigned_bus_resources(pdev->bus); \
+ \
+ if (pci_create_resource_files(pdev)) \
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
+ \
+ pci_write_config_word(pdev, PCI_COMMAND, cmd); \
+pm_put: \
+ pci_config_pm_runtime_put(pdev); \
+unlock: \
+ device_unlock(dev); \
+ \
+ return ret ? ret : count; \
+} \
+static DEVICE_ATTR_RW(resource##n##_resize)
+
+pci_dev_resource_resize_attr(0);
+pci_dev_resource_resize_attr(1);
+pci_dev_resource_resize_attr(2);
+pci_dev_resource_resize_attr(3);
+pci_dev_resource_resize_attr(4);
+pci_dev_resource_resize_attr(5);
+
+static struct attribute *resource_resize_attrs[] = {
+ &dev_attr_resource0_resize.attr,
+ &dev_attr_resource1_resize.attr,
+ &dev_attr_resource2_resize.attr,
+ &dev_attr_resource3_resize.attr,
+ &dev_attr_resource4_resize.attr,
+ &dev_attr_resource5_resize.attr,
+ NULL,
+};
+
+static umode_t resource_resize_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
+}
+
+static const struct attribute_group pci_dev_resource_resize_group = {
+ .attrs = resource_resize_attrs,
+ .is_visible = resource_resize_is_visible,
+};
+
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
if (!sysfs_initialized)
@@ -1494,6 +1601,7 @@ const struct attribute_group *pci_dev_groups[] = {
#ifdef CONFIG_ACPI
&pci_dev_acpi_attr_group,
#endif
+ &pci_dev_resource_resize_group,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 95bc329e74c0..2127aba3550b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,13 +66,15 @@ struct pci_pme_device {
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
- unsigned int delay = dev->d3hot_delay;
-
- if (delay < pci_pm_d3hot_delay)
- delay = pci_pm_d3hot_delay;
-
- if (delay)
- msleep(delay);
+ unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
+ unsigned int upper;
+
+ if (delay_ms) {
+ /* Use a 20% upper bound, 1ms minimum */
+ upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
+ usleep_range(delay_ms * USEC_PER_MSEC,
+ (delay_ms + upper) * USEC_PER_MSEC);
+ }
}
bool pci_reset_supported(struct pci_dev *dev)
@@ -1663,6 +1665,7 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
+ pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1769,6 +1772,7 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -2706,24 +2710,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
pci_enable_wake(dev, target_state, wakeup);
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -2764,24 +2756,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -3485,6 +3465,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
+ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (error)
+ pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
+
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 785f31086313..b1ebb7ab8805 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_PTM
+void pci_ptm_init(struct pci_dev *dev);
void pci_save_ptm_state(struct pci_dev *dev);
void pci_restore_ptm_state(struct pci_dev *dev);
-void pci_disable_ptm(struct pci_dev *dev);
+void pci_suspend_ptm(struct pci_dev *dev);
+void pci_resume_ptm(struct pci_dev *dev);
#else
+static inline void pci_ptm_init(struct pci_dev *dev) { }
static inline void pci_save_ptm_state(struct pci_dev *dev) { }
static inline void pci_restore_ptm_state(struct pci_dev *dev) { }
-static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline void pci_suspend_ptm(struct pci_dev *dev) { }
+static inline void pci_resume_ptm(struct pci_dev *dev) { }
#endif
unsigned long pci_cardbus_resource_alignment(struct resource *);
@@ -561,10 +565,14 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
+static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -575,12 +583,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_PCIE_PTM
-void pci_ptm_init(struct pci_dev *dev);
-#else
-static inline void pci_ptm_init(struct pci_dev *dev) { }
-#endif
-
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
@@ -774,4 +776,49 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
+/*
+ * Config Address for PCI Configuration Mechanism #1
+ *
+ * See PCI Local Bus Specification, Revision 3.0,
+ * Section 3.2.2.3.2, Figure 3-2, p. 50.
+ */
+
+#define PCI_CONF1_BUS_SHIFT 16 /* Bus number */
+#define PCI_CONF1_DEV_SHIFT 11 /* Device number */
+#define PCI_CONF1_FUNC_SHIFT 8 /* Function number */
+
+#define PCI_CONF1_BUS_MASK 0xff
+#define PCI_CONF1_DEV_MASK 0x1f
+#define PCI_CONF1_FUNC_MASK 0x7
+#define PCI_CONF1_REG_MASK 0xfc /* Limit aligned offset to a maximum of 256B */
+
+#define PCI_CONF1_ENABLE BIT(31)
+#define PCI_CONF1_BUS(x) (((x) & PCI_CONF1_BUS_MASK) << PCI_CONF1_BUS_SHIFT)
+#define PCI_CONF1_DEV(x) (((x) & PCI_CONF1_DEV_MASK) << PCI_CONF1_DEV_SHIFT)
+#define PCI_CONF1_FUNC(x) (((x) & PCI_CONF1_FUNC_MASK) << PCI_CONF1_FUNC_SHIFT)
+#define PCI_CONF1_REG(x) ((x) & PCI_CONF1_REG_MASK)
+
+#define PCI_CONF1_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ENABLE | \
+ PCI_CONF1_BUS(bus) | \
+ PCI_CONF1_DEV(dev) | \
+ PCI_CONF1_FUNC(func) | \
+ PCI_CONF1_REG(reg))
+
+/*
+ * Extension of PCI Config Address for accessing extended PCIe registers
+ *
+ * No standardized specification, but used on lot of non-ECAM-compliant ARM SoCs
+ * or on AMD Barcelona and new CPUs. Reserved bits [27:24] of PCI Config Address
+ * are used for specifying additional 4 high bits of PCI Express register.
+ */
+
+#define PCI_CONF1_EXT_REG_SHIFT 16
+#define PCI_CONF1_EXT_REG_MASK 0xf00
+#define PCI_CONF1_EXT_REG(x) (((x) & PCI_CONF1_EXT_REG_MASK) << PCI_CONF1_EXT_REG_SHIFT)
+
+#define PCI_CONF1_EXT_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ADDRESS(bus, dev, func, reg) | \
+ PCI_CONF1_EXT_REG(reg))
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a8aec190986c..53a1fa306e1e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
@@ -350,29 +351,43 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
return 0;
}
+/*
+ * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
+ * register. Ports enter L1.2 when the most recent LTR value is greater
+ * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
+ * don't enter L1.2 too aggressively.
+ *
+ * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
+ */
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
- u32 threshold_ns = threshold_us * 1000;
+ u64 threshold_ns = (u64) threshold_us * 1000;
- /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
- if (threshold_ns < 32) {
- *scale = 0;
+ /*
+ * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
+ * value of 0x3ff.
+ */
+ if (threshold_ns <= 0x3ff * 1) {
+ *scale = 0; /* Value times 1ns */
*value = threshold_ns;
- } else if (threshold_ns < 1024) {
- *scale = 1;
- *value = threshold_ns >> 5;
- } else if (threshold_ns < 32768) {
- *scale = 2;
- *value = threshold_ns >> 10;
- } else if (threshold_ns < 1048576) {
- *scale = 3;
- *value = threshold_ns >> 15;
- } else if (threshold_ns < 33554432) {
- *scale = 4;
- *value = threshold_ns >> 20;
+ } else if (threshold_ns <= 0x3ff * 32) {
+ *scale = 1; /* Value times 32ns */
+ *value = roundup(threshold_ns, 32) / 32;
+ } else if (threshold_ns <= 0x3ff * 1024) {
+ *scale = 2; /* Value times 1024ns */
+ *value = roundup(threshold_ns, 1024) / 1024;
+ } else if (threshold_ns <= 0x3ff * 32768) {
+ *scale = 3; /* Value times 32768ns */
+ *value = roundup(threshold_ns, 32768) / 32768;
+ } else if (threshold_ns <= 0x3ff * 1048576) {
+ *scale = 4; /* Value times 1048576ns */
+ *value = roundup(threshold_ns, 1048576) / 1048576;
+ } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
+ *scale = 5; /* Value times 33554432ns */
+ *value = roundup(threshold_ns, 33554432) / 33554432;
} else {
*scale = 5;
- *value = threshold_ns >> 25;
+ *value = 0x3ff; /* Max representable value */
}
}
@@ -455,6 +470,31 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
+static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
+{
+ u16 l1ss = dev->l1ss;
+ u32 l1_2_enable;
+
+ /*
+ * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
+ * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
+ */
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
+ * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
+ if (l1_2_enable)
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
+ ctl1 | l1_2_enable);
+}
+
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -464,7 +504,6 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
- u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -513,39 +552,78 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
- pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(parent, pctl1, ctl2);
+
+ cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(child, cctl1, ctl2);
+}
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- }
+static void aspm_l1ss_init(struct pcie_link_state *link)
+{
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 parent_l1ss_cap, child_l1ss_cap;
+ u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
- /* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
- pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+ if (!parent->l1ss || !child->l1ss)
+ return;
- /* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ /* Setup L1 substate */
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
+ &parent_l1ss_cap);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
+ &child_l1ss_cap);
- /* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
-
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
- }
+ if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ parent_l1ss_cap = 0;
+ if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ child_l1ss_cap = 0;
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!child->ltr_path)
+ child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+
+ if (parent_l1ss_cap)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ &parent_l1ss_ctl1);
+ if (child_l1ss_cap)
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ &child_l1ss_ctl1);
+
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+
+ if (link->aspm_support & ASPM_STATE_L1SS)
+ aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
@@ -553,8 +631,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
- u32 parent_l1ss_cap, child_l1ss_cap;
- u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
struct pci_bus *linkbus = parent->subordinate;
if (blacklist) {
@@ -609,52 +685,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- /* Setup L1 substate */
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
- &parent_l1ss_cap);
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
- &child_l1ss_cap);
-
- if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- parent_l1ss_cap = 0;
- if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- child_l1ss_cap = 0;
-
- /*
- * If we don't have LTR for the entire path from the Root Complex
- * to this device, we can't use ASPM L1.2 because it relies on the
- * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
- */
- if (!child->ltr_path)
- child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
-
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
-
- if (parent_l1ss_cap)
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- &parent_l1ss_ctl1);
- if (child_l1ss_cap)
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
- &child_l1ss_ctl1);
-
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
-
- if (link->aspm_support & ASPM_STATE_L1SS)
- aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
+ aspm_l1ss_init(link);
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -726,6 +757,43 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
+void pci_save_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = dev->l1ss;
+ u32 *cap;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap, ctl1, ctl2;
+ u16 l1ss = dev->l1ss;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ ctl2 = *cap++;
+ ctl1 = *cap;
+ aspm_program_l1ss(dev, ctl1, ctl2);
+}
+
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 3e9afee02e8d..f5ffea17c7f8 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -335,11 +335,16 @@ void pci_dpc_init(struct pci_dev *pdev)
return;
pdev->dpc_rp_extensions = true;
- pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
- pci_err(pdev, "RP PIO log size %u is invalid\n",
- pdev->dpc_rp_log_size);
- pdev->dpc_rp_log_size = 0;
+
+ /* Quirks may set dpc_rp_log_size if device or firmware is buggy */
+ if (!pdev->dpc_rp_log_size) {
+ pdev->dpc_rp_log_size =
+ (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+ pdev->dpc_rp_log_size = 0;
+ }
}
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 368a254e3124..b4e5f553467c 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -9,30 +9,38 @@
#include <linux/pci.h>
#include "../pci.h"
-static void pci_ptm_info(struct pci_dev *dev)
+/*
+ * If the next upstream device supports PTM, return it; otherwise return
+ * NULL. PTM Messages are local, so both link partners must support it.
+ */
+static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
{
- char clock_desc[8];
+ struct pci_dev *ups = pci_upstream_bridge(dev);
- switch (dev->ptm_granularity) {
- case 0:
- snprintf(clock_desc, sizeof(clock_desc), "unknown");
- break;
- case 255:
- snprintf(clock_desc, sizeof(clock_desc), ">254ns");
- break;
- default:
- snprintf(clock_desc, sizeof(clock_desc), "%uns",
- dev->ptm_granularity);
- break;
- }
- pci_info(dev, "PTM enabled%s, %s granularity\n",
- dev->ptm_root ? " (root)" : "", clock_desc);
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
+ * Switch Downstream Port, look up one more level.
+ */
+ if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
+ ups = pci_upstream_bridge(ups);
+
+ if (ups && ups->ptm_cap)
+ return ups;
+
+ return NULL;
}
-void pci_disable_ptm(struct pci_dev *dev)
+/*
+ * Find the PTM Capability (if present) and extract the information we need
+ * to use it.
+ */
+void pci_ptm_init(struct pci_dev *dev)
{
- int ptm;
- u16 ctrl;
+ u16 ptm;
+ u32 cap;
+ struct pci_dev *ups;
if (!pci_is_pcie(dev))
return;
@@ -41,21 +49,47 @@ void pci_disable_ptm(struct pci_dev *dev)
if (!ptm)
return;
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
- ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
+ dev->ptm_cap = ptm;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
+ dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+
+ /*
+ * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
+ * furthest upstream Time Source as the PTM Root. For Endpoints,
+ * "the Effective Granularity is the maximum Local Clock Granularity
+ * reported by the PTM Root and all intervening PTM Time Sources."
+ */
+ ups = pci_upstream_ptm(dev);
+ if (ups) {
+ if (ups->ptm_granularity == 0)
+ dev->ptm_granularity = 0;
+ else if (ups->ptm_granularity > dev->ptm_granularity)
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else if (cap & PCI_PTM_CAP_ROOT) {
+ dev->ptm_root = 1;
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+
+ /*
+ * Per sec 7.9.15.3, this should be the Local Clock
+ * Granularity of the associated Time Source. But it
+ * doesn't say how to find that Time Source.
+ */
+ dev->ptm_granularity = 0;
+ }
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
+ pci_enable_ptm(dev, NULL);
}
void pci_save_ptm_state(struct pci_dev *dev)
{
- int ptm;
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
- return;
-
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
if (!ptm)
return;
@@ -63,146 +97,152 @@ void pci_save_ptm_state(struct pci_dev *dev)
if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
}
void pci_restore_ptm_state(struct pci_dev *dev)
{
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- int ptm;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
+ if (!ptm)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state || !ptm)
+ if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
}
-void pci_ptm_init(struct pci_dev *dev)
+/* Enable PTM in the Control register if possible */
+static int __pci_enable_ptm(struct pci_dev *dev)
{
- int pos;
- u32 cap, ctrl;
- u8 local_clock;
+ u16 ptm = dev->ptm_cap;
struct pci_dev *ups;
+ u32 ctrl;
- if (!pci_is_pcie(dev))
- return;
-
- /*
- * Enable PTM only on interior devices (root ports, switch ports,
- * etc.) on the assumption that it causes no link traffic until an
- * endpoint enables it.
- */
- if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
- return;
+ if (!ptm)
+ return -EINVAL;
/*
- * Switch Downstream Ports are not permitted to have a PTM
- * capability; their PTM behavior is controlled by the Upstream
- * Port (PCIe r5.0, sec 7.9.16).
+ * A device uses local PTM Messages to request time information
+ * from a PTM Root that's farther upstream. Every device along the
+ * path must support PTM and have it enabled so it can handle the
+ * messages. Therefore, if this device is not a PTM Root, the
+ * upstream link partner must have PTM enabled before we can enable
+ * PTM.
*/
- ups = pci_upstream_bridge(dev);
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
- ups && ups->ptm_enabled) {
- dev->ptm_granularity = ups->ptm_granularity;
- dev->ptm_enabled = 1;
- return;
+ if (!dev->ptm_root) {
+ ups = pci_upstream_ptm(dev);
+ if (!ups || !ups->ptm_enabled)
+ return -EINVAL;
}
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
- pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
-
- /*
- * There's no point in enabling PTM unless it's enabled in the
- * upstream device or this device can be a PTM Root itself. Per
- * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
- * furthest upstream Time Source as the PTM Root.
- */
- if (ups && ups->ptm_enabled) {
- ctrl = PCI_PTM_CTRL_ENABLE;
- if (ups->ptm_granularity == 0)
- dev->ptm_granularity = 0;
- else if (ups->ptm_granularity > local_clock)
- dev->ptm_granularity = ups->ptm_granularity;
- } else {
- if (cap & PCI_PTM_CAP_ROOT) {
- ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
- dev->ptm_root = 1;
- dev->ptm_granularity = local_clock;
- } else
- return;
- }
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl |= PCI_PTM_CTRL_ENABLE;
+ ctrl &= ~PCI_PTM_GRANULARITY_MASK;
ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
- dev->ptm_enabled = 1;
+ if (dev->ptm_root)
+ ctrl |= PCI_PTM_CTRL_ROOT;
- pci_ptm_info(dev);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+ return 0;
}
+/**
+ * pci_enable_ptm() - Enable Precision Time Measurement
+ * @dev: PCI device
+ * @granularity: pointer to return granularity
+ *
+ * Enable Precision Time Measurement for @dev. If successful and
+ * @granularity is non-NULL, return the Effective Granularity.
+ *
+ * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
+ * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
+ */
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{
- int pos;
- u32 cap, ctrl;
- struct pci_dev *ups;
-
- if (!pci_is_pcie(dev))
- return -EINVAL;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- if (!(cap & PCI_PTM_CAP_REQ))
- return -EINVAL;
-
- /*
- * For a PCIe Endpoint, PTM is only useful if the endpoint can
- * issue PTM requests to upstream devices that have PTM enabled.
- *
- * For Root Complex Integrated Endpoints, there is no upstream
- * device, so there must be some implementation-specific way to
- * associate the endpoint with a time source.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
- ups = pci_upstream_bridge(dev);
- if (!ups || !ups->ptm_enabled)
- return -EINVAL;
+ int rc;
+ char clock_desc[8];
- dev->ptm_granularity = ups->ptm_granularity;
- } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
- dev->ptm_granularity = 0;
- } else
- return -EINVAL;
+ rc = __pci_enable_ptm(dev);
+ if (rc)
+ return rc;
- ctrl = PCI_PTM_CTRL_ENABLE;
- ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
dev->ptm_enabled = 1;
- pci_ptm_info(dev);
-
if (granularity)
*granularity = dev->ptm_granularity;
+
+ switch (dev->ptm_granularity) {
+ case 0:
+ snprintf(clock_desc, sizeof(clock_desc), "unknown");
+ break;
+ case 255:
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
+ dev->ptm_granularity);
+ break;
+ }
+ pci_info(dev, "PTM enabled%s, %s granularity\n",
+ dev->ptm_root ? " (root)" : "", clock_desc);
+
return 0;
}
EXPORT_SYMBOL(pci_enable_ptm);
+static void __pci_disable_ptm(struct pci_dev *dev)
+{
+ u16 ptm = dev->ptm_cap;
+ u32 ctrl;
+
+ if (!ptm)
+ return;
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+}
+
+/**
+ * pci_disable_ptm() - Disable Precision Time Measurement
+ * @dev: PCI device
+ *
+ * Disable Precision Time Measurement for @dev.
+ */
+void pci_disable_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled) {
+ __pci_disable_ptm(dev);
+ dev->ptm_enabled = 0;
+ }
+}
+EXPORT_SYMBOL(pci_disable_ptm);
+
+/*
+ * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
+ * resume if necessary.
+ */
+void pci_suspend_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_disable_ptm(dev);
+}
+
+/* If PTM was enabled before suspend, re-enable it when resuming */
+void pci_resume_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_enable_ptm(dev);
+}
+
bool pcie_ptm_enabled(struct pci_dev *dev)
{
if (!dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c5286b027f00..b66fa42c4b1f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1297,7 +1297,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
- unsigned int cmax;
+ unsigned int cmax, buses;
/*
* Bus already configured by firmware, process it in the
@@ -1322,7 +1322,8 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
child->bridge_ctl = bctl;
}
- cmax = pci_scan_child_bus(child);
+ buses = subordinate - secondary;
+ cmax = pci_scan_child_bus_extend(child, buses);
if (cmax > subordinate)
pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
subordinate, cmax);
@@ -2920,8 +2921,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* hotplug bridges too much during the second scan below.
*/
used_buses++;
- if (cmax - max > 1)
- used_buses += cmax - max - 1;
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2929,7 +2930,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int buses = 0;
if (!hotplug_bridges && normal_bridges == 1) {
-
/*
* There is only one bridge on the bus (upstream
* port) so it gets all available buses which it
@@ -2938,7 +2938,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
*/
buses = available_buses;
} else if (dev->is_hotplug_bridge) {
-
/*
* Distribute the extra buses between hotplug
* bridges if any.
@@ -2957,7 +2956,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
/*
* Make sure a hotplug bridge has at least the minimum requested
* number of buses but allow it to grow up to the maximum available
- * bus number of there is room.
+ * bus number if there is room.
*/
if (bus->self && bus->self->is_hotplug_bridge) {
used_buses = max_t(unsigned int, available_buses,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4944798e75b5..285acc4aaccc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5956,3 +5956,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
#endif
+
+#ifdef CONFIG_PCIE_DPC
+/*
+ * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
+ * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
+ */
+static void dpc_log_size(struct pci_dev *dev)
+{
+ u16 dpc, val;
+
+ dpc = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
+ if (!dpc)
+ return;
+
+ pci_read_config_word(dev, dpc + PCI_EXP_DPC_CAP, &val);
+ if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+ if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
+ pci_info(dev, "Overriding RP PIO Log Size to 4\n");
+ dev->dpc_rp_log_size = 4;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a29, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+#endif
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8cb68e6f6ef9..b4096598dbcb 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1745,119 +1745,6 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
}
#endif
-/*
- * First try will not touch PCI bridge res.
- * Second and later try will clear small leaf bridge res.
- * Will stop till to the max depth if can not find good one.
- */
-void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
-{
- LIST_HEAD(realloc_head);
- /* List of resources that want additional resources */
- struct list_head *add_list = NULL;
- int tried_times = 0;
- enum release_type rel_type = leaf_only;
- LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int pci_try_num = 1;
- enum enable_type enable_local;
-
- /* Don't realloc if asked to do so */
- enable_local = pci_realloc_detect(bus, pci_realloc_enable);
- if (pci_realloc_enabled(enable_local)) {
- int max_depth = pci_bus_get_depth(bus);
-
- pci_try_num = max_depth + 1;
- dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
- }
-
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
-
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
-
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
-
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
-
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
-
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
-
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
- }
- free_list(&fail_head);
-
- goto again;
-
-dump:
- /* Dump the resource on buses */
- pci_bus_dump_resources(bus);
-}
-
-void __init pci_assign_unassigned_resources(void)
-{
- struct pci_bus *root_bus;
-
- list_for_each_entry(root_bus, &pci_root_buses, node) {
- pci_assign_unassigned_root_bus_resources(root_bus);
-
- /* Make sure the root bridge has a companion ACPI device */
- if (ACPI_HANDLE(root_bus->bridge))
- acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
- }
-}
-
static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
struct list_head *add_list,
resource_size_t new_size)
@@ -2029,7 +1916,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
- struct list_head *add_list)
+ struct list_head *add_list)
{
struct resource available_io, available_mmio, available_mmio_pref;
@@ -2047,6 +1934,119 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
+/*
+ * First try will not touch PCI bridge res.
+ * Second and later try will clear small leaf bridge res.
+ * Will stop till to the max depth if can not find good one.
+ */
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
+{
+ LIST_HEAD(realloc_head);
+ /* List of resources that want additional resources */
+ struct list_head *add_list = NULL;
+ int tried_times = 0;
+ enum release_type rel_type = leaf_only;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ int pci_try_num = 1;
+ enum enable_type enable_local;
+
+ /* Don't realloc if asked to do so */
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
+
+ pci_try_num = max_depth + 1;
+ dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
+ }
+
+again:
+ /*
+ * Last try will use add_list, otherwise will try good to have as must
+ * have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
+
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
+
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ goto dump;
+
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+
+ free_list(&fail_head);
+ goto dump;
+ }
+
+ dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
+ tried_times + 1);
+
+ /* Third times and later will not check if it is leaf */
+ if ((tried_times + 1) > 2)
+ rel_type = whole_subtree;
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge.
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+ int idx;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+ }
+ free_list(&fail_head);
+
+ goto again;
+
+dump:
+ /* Dump the resource on buses */
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
+ pci_assign_unassigned_root_bus_resources(root_bus);
+
+ /* Make sure the root bridge has a companion ACPI device */
+ if (ACPI_HANDLE(root_bus->bridge))
+ acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
+ }
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 439ac5f5907a..b492e67c3d87 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -214,6 +214,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = pci_find_parent_resource(dev, res);
if (!root) {
+ /*
+ * If dev is behind a bridge, accesses will only reach it
+ * if res is inside the relevant bridge window.
+ */
+ if (pci_upstream_bridge(dev))
+ return -ENXIO;
+
+ /*
+ * On the root bus, assume the host bridge will forward
+ * everything.
+ */
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 689271c4245c..7378e2f3e525 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -521,24 +521,14 @@ static int pcifront_rescan_root(struct pcifront_device *pdev,
int err;
struct pci_bus *b;
-#ifndef CONFIG_PCI_DOMAINS
- if (domain != 0) {
- dev_err(&pdev->xdev->dev,
- "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
- dev_err(&pdev->xdev->dev,
- "Please compile with CONFIG_PCI_DOMAINS\n");
- return -EINVAL;
- }
-#endif
-
- dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
- domain, bus);
-
b = pci_find_bus(domain, bus);
if (!b)
/* If the bus is unknown, create it. */
return pcifront_scan_root(pdev, domain, bus);
+ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
err = pcifront_scan_bus(pdev, domain, bus, b);
/* Claim resources before going "live" with our devices */
@@ -819,76 +809,73 @@ out:
return err;
}
-static int pcifront_try_connect(struct pcifront_device *pdev)
+static void pcifront_connect(struct pcifront_device *pdev)
{
- int err = -EFAULT;
+ int err;
int i, num_roots, len;
char str[64];
unsigned int domain, bus;
-
- /* Only connect once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateInitialised)
- goto out;
-
- err = pcifront_connect_and_init_dma(pdev);
- if (err && err != -EEXIST) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error setting up PCI Frontend");
- goto out;
- }
-
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
"root_num", "%d", &num_roots);
if (err == -ENOENT) {
xenbus_dev_error(pdev->xdev, err,
"No PCI Roots found, trying 0000:00");
- err = pcifront_scan_root(pdev, 0, 0);
+ err = pcifront_rescan_root(pdev, 0, 0);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error scanning PCI root 0000:00");
- goto out;
+ return;
}
num_roots = 0;
} else if (err != 1) {
- if (err == 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
+ xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err,
"Error reading number of PCI roots");
- goto out;
+ return;
}
for (i = 0; i < num_roots; i++) {
len = snprintf(str, sizeof(str), "root-%d", i);
- if (unlikely(len >= (sizeof(str) - 1))) {
- err = -ENOMEM;
- goto out;
- }
+ if (unlikely(len >= (sizeof(str) - 1)))
+ return;
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
"%x:%x", &domain, &bus);
if (err != 2) {
- if (err >= 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
+ xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err,
"Error reading PCI root %d", i);
- goto out;
+ return;
}
- err = pcifront_scan_root(pdev, domain, bus);
+ err = pcifront_rescan_root(pdev, domain, bus);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error scanning PCI root %04x:%02x",
domain, bus);
- goto out;
+ return;
}
}
- err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+}
-out:
- return err;
+static void pcifront_try_connect(struct pcifront_device *pdev)
+{
+ int err;
+
+ /* Only connect once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ return;
+
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error setting up PCI Frontend");
+ return;
+ }
+
+ pcifront_connect(pdev);
}
static int pcifront_try_disconnect(struct pcifront_device *pdev)
@@ -914,80 +901,37 @@ out:
return err;
}
-static int pcifront_attach_devices(struct pcifront_device *pdev)
+static void pcifront_attach_devices(struct pcifront_device *pdev)
{
- int err = -EFAULT;
- int i, num_roots, len;
- unsigned int domain, bus;
- char str[64];
-
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ if (xenbus_read_driver_state(pdev->xdev->nodename) ==
XenbusStateReconfiguring)
- goto out;
-
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
- "root_num", "%d", &num_roots);
- if (err == -ENOENT) {
- xenbus_dev_error(pdev->xdev, err,
- "No PCI Roots found, trying 0000:00");
- err = pcifront_rescan_root(pdev, 0, 0);
- if (err) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error scanning PCI root 0000:00");
- goto out;
- }
- num_roots = 0;
- } else if (err != 1) {
- if (err == 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
- "Error reading number of PCI roots");
- goto out;
- }
-
- for (i = 0; i < num_roots; i++) {
- len = snprintf(str, sizeof(str), "root-%d", i);
- if (unlikely(len >= (sizeof(str) - 1))) {
- err = -ENOMEM;
- goto out;
- }
-
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
- "%x:%x", &domain, &bus);
- if (err != 2) {
- if (err >= 0)
- err = -EINVAL;
- xenbus_dev_fatal(pdev->xdev, err,
- "Error reading PCI root %d", i);
- goto out;
- }
-
- err = pcifront_rescan_root(pdev, domain, bus);
- if (err) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error scanning PCI root %04x:%02x",
- domain, bus);
- goto out;
- }
- }
-
- xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-
-out:
- return err;
+ pcifront_connect(pdev);
}
static int pcifront_detach_devices(struct pcifront_device *pdev)
{
int err = 0;
int i, num_devs;
+ enum xenbus_state state;
unsigned int domain, bus, slot, func;
struct pci_dev *pci_dev;
char str[64];
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateConnected)
+ state = xenbus_read_driver_state(pdev->xdev->nodename);
+ if (state == XenbusStateInitialised) {
+ dev_dbg(&pdev->xdev->dev, "Handle skipped connect.\n");
+ /* We missed Connected and need to initialize. */
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error setting up PCI Frontend");
+ goto out;
+ }
+
+ goto out_switch_state;
+ } else if (state != XenbusStateConnected) {
goto out;
+ }
err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
&num_devs);
@@ -1048,6 +992,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
domain, bus, slot, func);
}
+ out_switch_state:
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
out:
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 44c07ea487f4..341010f20b77 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -185,7 +185,7 @@ config APPLE_M1_CPU_PMU
config ALIBABA_UNCORE_DRW_PMU
tristate "Alibaba T-Head Yitian 710 DDR Sub-system Driveway PMU driver"
- depends on ARM64 || COMPILE_TEST
+ depends on (ARM64 && ACPI) || COMPILE_TEST
help
Support for Driveway PMU events monitoring on Yitian 710 DDR
Sub-system.
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 82729b874f09..a7689fecb49d 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -658,8 +658,8 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!drw_pmu->cfg_base)
- return -ENOMEM;
+ if (IS_ERR(drw_pmu->cfg_base))
+ return PTR_ERR(drw_pmu->cfg_base);
name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
(u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 6ce05ef4844d..00e3a637f7b6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -44,7 +44,9 @@
* This allows us to perform the check, i.e, perfmon_capable(),
* in the context of the event owner, once, during the event_init().
*/
-#define SPE_PMU_HW_FLAGS_CX BIT(0)
+#define SPE_PMU_HW_FLAGS_CX 0x00001
+
+static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
static void set_spe_event_has_cx(struct perf_event *event)
{
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 2c20b0de8cb0..ca9e20bfc7ac 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -14,7 +14,6 @@
#define RISCV_PMU_LEGACY_CYCLE 0
#define RISCV_PMU_LEGACY_INSTRET 1
-#define RISCV_PMU_LEGACY_NUM_CTR 2
static bool pmu_init_done;
@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
{
pr_info("Legacy PMU implementation is available\n");
- pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
+ pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
+ BIT(RISCV_PMU_LEGACY_INSTRET);
pmu->ctr_start = pmu_legacy_ctr_start;
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 8de4ca2fef21..3852c18362f5 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -18,6 +18,7 @@
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/cpu_pm.h>
+#include <linux/sched/clock.h>
#include <asm/sbi.h>
#include <asm/hwcap.h>
@@ -271,7 +272,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct sbiret ret;
int idx;
uint64_t cbase = 0;
- uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
unsigned long cflags = 0;
if (event->attr.exclude_kernel)
@@ -281,11 +281,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
- cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+ rvpmu->cmask, cflags, hwc->event_base, hwc->config,
+ hwc->config >> 32);
#else
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
- cflags, hwc->event_base, hwc->config, 0);
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+ rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -294,7 +295,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
}
idx = ret.value;
- if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
+ if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
return -ENOENT;
/* Additional sanity check for the counter id */
@@ -463,7 +464,7 @@ static int pmu_sbi_find_num_ctrs(void)
return sbi_err_map_linux_errno(ret.error);
}
-static int pmu_sbi_get_ctrinfo(int nctr)
+static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
{
struct sbiret ret;
int i, num_hw_ctr = 0, num_fw_ctr = 0;
@@ -478,6 +479,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
continue;
+
+ *mask |= BIT(i);
+
cinfo.value = ret.value;
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
num_fw_ctr++;
@@ -498,7 +502,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
- 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
+ 0, pmu->cmask, 0, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -567,6 +571,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
unsigned long overflow;
unsigned long overflowed_ctrs = 0;
struct cpu_hw_events *cpu_hw_evt = dev;
+ u64 start_clock = sched_clock();
if (WARN_ON_ONCE(!cpu_hw_evt))
return IRQ_NONE;
@@ -635,7 +640,9 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
perf_event_overflow(event, &data, regs);
}
}
+
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
+ perf_sample_event_took(sched_clock() - start_clock);
return IRQ_HANDLED;
}
@@ -645,8 +652,11 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
- /* Enable the access for TIME csr only from the user mode now */
- csr_write(CSR_SCOUNTEREN, 0x2);
+ /*
+ * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace,
+ * as is necessary to maintain uABI compatibility.
+ */
+ csr_write(CSR_SCOUNTEREN, 0x7);
/* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu);
@@ -788,8 +798,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
- int num_counters;
+ unsigned long cmask = 0;
int ret = -ENODEV;
+ int num_counters;
pr_info("SBI PMU extension is available\n");
pmu = riscv_pmu_alloc();
@@ -803,7 +814,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
/* cache all the information about counters now */
- if (pmu_sbi_get_ctrinfo(num_counters))
+ if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
goto out_free;
ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -812,8 +823,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
+
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
- pmu->num_counters = num_counters;
+ pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
pmu->event_map = pmu_sbi_event_map;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 300b0f2b5f84..7bd00a11d074 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -91,6 +91,7 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+source "drivers/phy/sunplus/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
source "drivers/phy/intel/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 01e9efffc726..54f312c10a40 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -31,6 +31,7 @@ obj-y += allwinner/ \
samsung/ \
socionext/ \
st/ \
+ sunplus/ \
tegra/ \
ti/ \
xilinx/
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index d5f3b42eb8ce..3a3831f6059a 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -768,7 +768,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (data->cfg->dedicated_clocks)
snprintf(name, sizeof(name), "usb%d_phy", i);
else
- strlcpy(name, "usb_phy", sizeof(name));
+ strscpy(name, "usb_phy", sizeof(name));
phy->clk = devm_clk_get(dev, name);
if (IS_ERR(phy->clk)) {
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index 1027ece6ca12..a3e1108b736d 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -197,7 +197,7 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
struct phy_provider *phy;
struct device *dev = &pdev->dev;
struct phy_axg_mipi_pcie_analog_priv *priv;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->of_node, *parent_np;
struct regmap *map;
int ret;
@@ -206,7 +206,9 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
return -ENOMEM;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ parent_np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 54d65a6f0fcc..d2a1da8d9e58 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -388,7 +388,6 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
struct phy_g12a_usb3_pcie_priv *priv;
struct phy_provider *phy_provider;
void __iomem *base;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -408,43 +407,24 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap_cr))
return PTR_ERR(priv->regmap_cr);
- priv->clk_ref = devm_clk_get(dev, "ref_clk");
+ priv->clk_ref = devm_clk_get_enabled(dev, "ref_clk");
if (IS_ERR(priv->clk_ref))
return PTR_ERR(priv->clk_ref);
- ret = clk_prepare_enable(priv->clk_ref);
- if (ret)
- return ret;
-
priv->reset = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(priv->reset)) {
- ret = PTR_ERR(priv->reset);
- goto err_disable_clk_ref;
- }
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
- if (IS_ERR(priv->phy)) {
- ret = PTR_ERR(priv->phy);
- dev_err_probe(dev, ret, "failed to create PHY\n");
- goto err_disable_clk_ref;
- }
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy), "failed to create PHY\n");
phy_set_drvdata(priv->phy, priv);
dev_set_drvdata(dev, priv);
phy_provider = devm_of_phy_provider_register(dev,
phy_g12a_usb3_pcie_xlate);
- if (IS_ERR(phy_provider)) {
- ret = PTR_ERR(phy_provider);
- goto err_disable_clk_ref;
- }
-
- return 0;
-
-err_disable_clk_ref:
- clk_disable_unprepare(priv->clk_ref);
-
- return ret;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id phy_g12a_usb3_pcie_of_match[] = {
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index ad7d2edfc414..c93286483b42 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -59,7 +59,7 @@ struct imx8_pcie_phy {
bool clkreq_unused;
};
-static int imx8_pcie_phy_init(struct phy *phy)
+static int imx8_pcie_phy_power_on(struct phy *phy)
{
int ret;
u32 val, pad_mode;
@@ -137,14 +137,14 @@ static int imx8_pcie_phy_init(struct phy *phy)
return ret;
}
-static int imx8_pcie_phy_power_on(struct phy *phy)
+static int imx8_pcie_phy_init(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
return clk_prepare_enable(imx8_phy->clk);
}
-static int imx8_pcie_phy_power_off(struct phy *phy)
+static int imx8_pcie_phy_exit(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
@@ -155,8 +155,8 @@ static int imx8_pcie_phy_power_off(struct phy *phy)
static const struct phy_ops imx8_pcie_phy_ops = {
.init = imx8_pcie_phy_init,
+ .exit = imx8_pcie_phy_exit,
.power_on = imx8_pcie_phy_power_on,
- .power_off = imx8_pcie_phy_power_off,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index 157683d10367..6010e246d52e 100644
--- a/drivers/phy/intel/phy-intel-lgm-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
@@ -413,44 +413,29 @@ static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
u32 val;
cbphy->core_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(cbphy->core_clk)) {
- ret = PTR_ERR(cbphy->core_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get clk failed:%d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->core_clk))
+ return dev_err_probe(dev, PTR_ERR(cbphy->core_clk),
+ "Get clk failed!\n");
cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
- if (IS_ERR(cbphy->core_rst)) {
- ret = PTR_ERR(cbphy->core_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get core reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->core_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->core_rst),
+ "Get core reset control err!\n");
cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(cbphy->phy_rst)) {
- ret = PTR_ERR(cbphy->phy_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get PHY reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->phy_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->phy_rst),
+ "Get PHY reset control err!\n");
cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
- if (IS_ERR(cbphy->iphy[0].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[0].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->iphy[0].app_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->iphy[0].app_rst),
+ "Get phy0 reset control err!\n");
cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
- if (IS_ERR(cbphy->iphy[1].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[1].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->iphy[1].app_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->iphy[1].app_rst),
+ "Get phy1 reset control err!\n");
cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(cbphy->app_base))
diff --git a/drivers/phy/mediatek/phy-mtk-dp.c b/drivers/phy/mediatek/phy-mtk-dp.c
index 31266e7ca324..232fd3f1ff1b 100644
--- a/drivers/phy/mediatek/phy-mtk-dp.c
+++ b/drivers/phy/mediatek/phy-mtk-dp.c
@@ -85,7 +85,7 @@ struct mtk_dp_phy {
static int mtk_dp_phy_init(struct phy *phy)
{
struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
- u32 driving_params[] = {
+ static const u32 driving_params[] = {
DRIVING_PARAM_3_DEFAULT,
DRIVING_PARAM_4_DEFAULT,
DRIVING_PARAM_5_DEFAULT,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
index b74c65a1762c..e51b2d13eab4 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
@@ -5,83 +5,66 @@
*/
#include "phy-mtk-hdmi.h"
+#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
-#define RG_HDMITX_DRV_IBIAS 0
-#define RG_HDMITX_DRV_IBIAS_MASK (0x3f << 0)
-#define RG_HDMITX_EN_SER 12
-#define RG_HDMITX_EN_SER_MASK (0x0f << 12)
-#define RG_HDMITX_EN_SLDO 16
-#define RG_HDMITX_EN_SLDO_MASK (0x0f << 16)
-#define RG_HDMITX_EN_PRED 20
-#define RG_HDMITX_EN_PRED_MASK (0x0f << 20)
-#define RG_HDMITX_EN_IMP 24
-#define RG_HDMITX_EN_IMP_MASK (0x0f << 24)
-#define RG_HDMITX_EN_DRV 28
-#define RG_HDMITX_EN_DRV_MASK (0x0f << 28)
+#define RG_HDMITX_DRV_IBIAS_MASK GENMASK(5, 0)
+#define RG_HDMITX_EN_SER_MASK GENMASK(15, 12)
+#define RG_HDMITX_EN_SLDO_MASK GENMASK(19, 16)
+#define RG_HDMITX_EN_PRED_MASK GENMASK(23, 20)
+#define RG_HDMITX_EN_IMP_MASK GENMASK(27, 24)
+#define RG_HDMITX_EN_DRV_MASK GENMASK(31, 28)
#define HDMI_CON1 0x04
-#define RG_HDMITX_PRED_IBIAS 18
-#define RG_HDMITX_PRED_IBIAS_MASK (0x0f << 18)
-#define RG_HDMITX_PRED_IMP (0x01 << 22)
-#define RG_HDMITX_DRV_IMP 26
-#define RG_HDMITX_DRV_IMP_MASK (0x3f << 26)
+#define RG_HDMITX_PRED_IBIAS_MASK GENMASK(21, 18)
+#define RG_HDMITX_PRED_IMP BIT(22)
+#define RG_HDMITX_DRV_IMP_MASK GENMASK(31, 26)
#define HDMI_CON2 0x08
-#define RG_HDMITX_EN_TX_CKLDO (0x01 << 0)
-#define RG_HDMITX_EN_TX_POSDIV (0x01 << 1)
-#define RG_HDMITX_TX_POSDIV 3
-#define RG_HDMITX_TX_POSDIV_MASK (0x03 << 3)
-#define RG_HDMITX_EN_MBIAS (0x01 << 6)
-#define RG_HDMITX_MBIAS_LPF_EN (0x01 << 7)
+#define RG_HDMITX_EN_TX_CKLDO BIT(0)
+#define RG_HDMITX_EN_TX_POSDIV BIT(1)
+#define RG_HDMITX_TX_POSDIV_MASK GENMASK(4, 3)
+#define RG_HDMITX_EN_MBIAS BIT(6)
+#define RG_HDMITX_MBIAS_LPF_EN BIT(7)
#define HDMI_CON4 0x10
-#define RG_HDMITX_RESERVE_MASK (0xffffffff << 0)
+#define RG_HDMITX_RESERVE_MASK GENMASK(31, 0)
#define HDMI_CON6 0x18
-#define RG_HTPLL_BR 0
-#define RG_HTPLL_BR_MASK (0x03 << 0)
-#define RG_HTPLL_BC 2
-#define RG_HTPLL_BC_MASK (0x03 << 2)
-#define RG_HTPLL_BP 4
-#define RG_HTPLL_BP_MASK (0x0f << 4)
-#define RG_HTPLL_IR 8
-#define RG_HTPLL_IR_MASK (0x0f << 8)
-#define RG_HTPLL_IC 12
-#define RG_HTPLL_IC_MASK (0x0f << 12)
-#define RG_HTPLL_POSDIV 16
-#define RG_HTPLL_POSDIV_MASK (0x03 << 16)
-#define RG_HTPLL_PREDIV 18
-#define RG_HTPLL_PREDIV_MASK (0x03 << 18)
-#define RG_HTPLL_FBKSEL 20
-#define RG_HTPLL_FBKSEL_MASK (0x03 << 20)
-#define RG_HTPLL_RLH_EN (0x01 << 22)
-#define RG_HTPLL_FBKDIV 24
-#define RG_HTPLL_FBKDIV_MASK (0x7f << 24)
-#define RG_HTPLL_EN (0x01 << 31)
+#define RG_HTPLL_BR_MASK GENMASK(1, 0)
+#define RG_HTPLL_BC_MASK GENMASK(3, 2)
+#define RG_HTPLL_BP_MASK GENMASK(7, 4)
+#define RG_HTPLL_IR_MASK GENMASK(11, 8)
+#define RG_HTPLL_IC_MASK GENMASK(15, 12)
+#define RG_HTPLL_POSDIV_MASK GENMASK(17, 16)
+#define RG_HTPLL_PREDIV_MASK GENMASK(19, 18)
+#define RG_HTPLL_FBKSEL_MASK GENMASK(21, 20)
+#define RG_HTPLL_RLH_EN BIT(22)
+#define RG_HTPLL_FBKDIV_MASK GENMASK(30, 24)
+#define RG_HTPLL_EN BIT(31)
#define HDMI_CON7 0x1c
-#define RG_HTPLL_AUTOK_EN (0x01 << 23)
-#define RG_HTPLL_DIVEN 28
-#define RG_HTPLL_DIVEN_MASK (0x07 << 28)
+#define RG_HTPLL_AUTOK_EN BIT(23)
+#define RG_HTPLL_DIVEN_MASK GENMASK(30, 28)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
return 0;
}
@@ -89,20 +72,21 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
@@ -116,6 +100,7 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
u32 pos_div;
if (rate <= 64000000)
@@ -125,37 +110,25 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
else
pos_div = 1;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
- RG_HTPLL_IC_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
- RG_HTPLL_IR_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
- RG_HDMITX_TX_POSDIV_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
- RG_HTPLL_FBKSEL_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
- RG_HTPLL_FBKDIV_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
- RG_HTPLL_DIVEN_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
- RG_HTPLL_BP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
- RG_HTPLL_BC_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
- RG_HTPLL_BR_MASK);
-
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
- RG_HDMITX_PRED_IBIAS_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
- RG_HDMITX_DRV_IMP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
- RG_HDMITX_DRV_IBIAS_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IC_MASK, 0x1);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IR_MASK, 0x1);
+ mtk_phy_update_field(base + HDMI_CON2, RG_HDMITX_TX_POSDIV_MASK, pos_div);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKSEL_MASK, 1);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKDIV_MASK, 19);
+ mtk_phy_update_field(base + HDMI_CON7, RG_HTPLL_DIVEN_MASK, 0x2);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BP_MASK, 0xc);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BC_MASK, 0x2);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BR_MASK, 0x1);
+
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PRED_IMP);
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PRED_IBIAS_MASK, 0x3);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_DRV_IMP_MASK, 0x28);
+ mtk_phy_update_field(base + HDMI_CON4, RG_HDMITX_RESERVE_MASK, 0x28);
+ mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_DRV_IBIAS_MASK, 0xa);
return 0;
}
@@ -164,9 +137,10 @@ static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned long out_rate, val;
+ u32 tmp;
- val = (readl(hdmi_phy->regs + HDMI_CON6)
- & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+ tmp = readl(hdmi_phy->regs + HDMI_CON6);
+ val = FIELD_GET(RG_HTPLL_PREDIV_MASK, tmp);
switch (val) {
case 0x00:
out_rate = parent_rate;
@@ -179,14 +153,14 @@ static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
break;
}
- val = (readl(hdmi_phy->regs + HDMI_CON6)
- & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+ val = FIELD_GET(RG_HTPLL_FBKDIV_MASK, tmp);
out_rate *= (val + 1) * 2;
- val = (readl(hdmi_phy->regs + HDMI_CON2)
- & RG_HDMITX_TX_POSDIV_MASK);
- out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
- if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+ tmp = readl(hdmi_phy->regs + HDMI_CON2);
+ val = FIELD_GET(RG_HDMITX_TX_POSDIV_MASK, tmp);
+ out_rate >>= val;
+
+ if (tmp & RG_HDMITX_EN_TX_POSDIV)
out_rate /= 5;
return out_rate;
@@ -202,37 +176,41 @@ static const struct clk_ops mtk_hdmi_phy_pll_ops = {
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ void __iomem *base = hdmi_phy->regs;
+
+ mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ void __iomem *base = hdmi_phy->regs;
+
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
index 6cdfdf5a698a..d04758396046 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
@@ -5,121 +5,99 @@
*/
#include "phy-mtk-hdmi.h"
+#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_PLL_EN BIT(31)
-#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
-#define PLL_FBKDIV_SHIFT 24
-#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
-#define PLL_FBKSEL_SHIFT 22
-#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
-#define PREDIV_SHIFT 20
-#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
-#define POSDIV_SHIFT 18
-#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
-#define RG_HDMITX_PLL_IR (0xf << 12)
-#define PLL_IR_SHIFT 12
-#define RG_HDMITX_PLL_IC (0xf << 8)
-#define PLL_IC_SHIFT 8
-#define RG_HDMITX_PLL_BP (0xf << 4)
-#define PLL_BP_SHIFT 4
-#define RG_HDMITX_PLL_BR (0x3 << 2)
-#define PLL_BR_SHIFT 2
-#define RG_HDMITX_PLL_BC (0x3 << 0)
-#define PLL_BC_SHIFT 0
+#define RG_HDMITX_PLL_FBKDIV GENMASK(30, 24)
+#define RG_HDMITX_PLL_FBKSEL GENMASK(23, 22)
+#define RG_HDMITX_PLL_PREDIV GENMASK(21, 20)
+#define RG_HDMITX_PLL_POSDIV GENMASK(19, 18)
+#define RG_HDMITX_PLL_RST_DLY GENMASK(17, 16)
+#define RG_HDMITX_PLL_IR GENMASK(15, 12)
+#define RG_HDMITX_PLL_IC GENMASK(11, 8)
+#define RG_HDMITX_PLL_BP GENMASK(7, 4)
+#define RG_HDMITX_PLL_BR GENMASK(3, 2)
+#define RG_HDMITX_PLL_BC GENMASK(1, 0)
#define HDMI_CON1 0x04
-#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
-#define PLL_DIVEN_SHIFT 29
+#define RG_HDMITX_PLL_DIVEN GENMASK(31, 29)
#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
-#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
-#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_KF GENMASK(27, 26)
+#define RG_HDMITX_PLL_AUTOK_KS GENMASK(25, 24)
#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
-#define RG_HDMITX_PLL_BAND (0x3f << 16)
+#define RG_HDMITX_PLL_BAND GENMASK(21, 16)
#define RG_HDMITX_PLL_REF_SEL BIT(15)
#define RG_HDMITX_PLL_BIAS_EN BIT(14)
#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
-#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
-#define PLL_TXDIV_SHIFT 10
+#define RG_HDMITX_PLL_TXDIV GENMASK(11, 10)
#define RG_HDMITX_PLL_LVROD_EN BIT(9)
#define RG_HDMITX_PLL_MONVC_EN BIT(8)
#define RG_HDMITX_PLL_MONCK_EN BIT(7)
#define RG_HDMITX_PLL_MONREF_EN BIT(6)
#define RG_HDMITX_PLL_TST_EN BIT(5)
#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
-#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
+#define RG_HDMITX_PLL_TST_SEL GENMASK(3, 0)
#define HDMI_CON2 0x08
-#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_BAND GENMASK(14, 8)
#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
#define RG_HDMITX_EN_TX_CKLDO BIT(0)
#define HDMI_CON3 0x0c
-#define RG_HDMITX_SER_EN (0xf << 28)
-#define RG_HDMITX_PRD_EN (0xf << 24)
-#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
-#define RG_HDMITX_DRV_EN (0xf << 16)
-#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
-#define DRV_IMP_EN_SHIFT 12
+#define RG_HDMITX_SER_EN GENMASK(31, 28)
+#define RG_HDMITX_PRD_EN GENMASK(27, 24)
+#define RG_HDMITX_PRD_IMP_EN GENMASK(23, 20)
+#define RG_HDMITX_DRV_EN GENMASK(19, 16)
+#define RG_HDMITX_DRV_IMP_EN GENMASK(15, 12)
#define RG_HDMITX_MHLCK_FORCE BIT(10)
#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
#define RG_HDMITX_MHLCK_EN BIT(8)
-#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
+#define RG_HDMITX_SER_DIN_SEL GENMASK(7, 4)
#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
#define RG_HDMITX_SER_BIST_TOG BIT(2)
#define RG_HDMITX_SER_DIN_TOG BIT(1)
#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
#define HDMI_CON4 0x10
-#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
-#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
-#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
-#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
-#define PRD_IBIAS_CLK_SHIFT 24
-#define PRD_IBIAS_D2_SHIFT 16
-#define PRD_IBIAS_D1_SHIFT 8
-#define PRD_IBIAS_D0_SHIFT 0
+#define RG_HDMITX_PRD_IBIAS_CLK GENMASK(27, 24)
+#define RG_HDMITX_PRD_IBIAS_D2 GENMASK(19, 16)
+#define RG_HDMITX_PRD_IBIAS_D1 GENMASK(11, 8)
+#define RG_HDMITX_PRD_IBIAS_D0 GENMASK(3, 0)
#define HDMI_CON5 0x14
-#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
-#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
-#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
-#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
-#define DRV_IBIAS_CLK_SHIFT 24
-#define DRV_IBIAS_D2_SHIFT 16
-#define DRV_IBIAS_D1_SHIFT 8
-#define DRV_IBIAS_D0_SHIFT 0
+#define RG_HDMITX_DRV_IBIAS_CLK GENMASK(29, 24)
+#define RG_HDMITX_DRV_IBIAS_D2 GENMASK(21, 16)
+#define RG_HDMITX_DRV_IBIAS_D1 GENMASK(13, 8)
+#define RG_HDMITX_DRV_IBIAS_D0 GENMASK(5, 0)
#define HDMI_CON6 0x18
-#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
-#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
-#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
-#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
-#define DRV_IMP_CLK_SHIFT 24
-#define DRV_IMP_D2_SHIFT 16
-#define DRV_IMP_D1_SHIFT 8
-#define DRV_IMP_D0_SHIFT 0
+#define RG_HDMITX_DRV_IMP_CLK GENMASK(29, 24)
+#define RG_HDMITX_DRV_IMP_D2 GENMASK(21, 16)
+#define RG_HDMITX_DRV_IMP_D1 GENMASK(13, 8)
+#define RG_HDMITX_DRV_IMP_D0 GENMASK(5, 0)
#define HDMI_CON7 0x1c
-#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
-#define RG_HDMITX_SER_DIN (0x3ff << 16)
-#define RG_HDMITX_CHLDC_TST (0xf << 12)
-#define RG_HDMITX_CHLCK_TST (0xf << 8)
-#define RG_HDMITX_RESERVE (0xff << 0)
+#define RG_HDMITX_MHLCK_DRV_IBIAS GENMASK(31, 27)
+#define RG_HDMITX_SER_DIN GENMASK(25, 16)
+#define RG_HDMITX_CHLDC_TST GENMASK(15, 12)
+#define RG_HDMITX_CHLCK_TST GENMASK(11, 8)
+#define RG_HDMITX_RESERVE GENMASK(7, 0)
#define HDMI_CON8 0x20
-#define RGS_HDMITX_2T1_LEV (0xf << 16)
-#define RGS_HDMITX_2T1_EDG (0xf << 12)
-#define RGS_HDMITX_5T1_LEV (0xf << 8)
-#define RGS_HDMITX_5T1_EDG (0xf << 4)
+#define RGS_HDMITX_2T1_LEV GENMASK(19, 16)
+#define RGS_HDMITX_2T1_EDG GENMASK(15, 12)
+#define RGS_HDMITX_5T1_LEV GENMASK(11, 8)
+#define RGS_HDMITX_5T1_EDG GENMASK(7, 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_MHLCK_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
return 0;
}
@@ -127,15 +105,16 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
usleep_range(100, 150);
}
@@ -157,6 +136,7 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
unsigned int pre_div;
unsigned int div;
unsigned int pre_ibias;
@@ -177,65 +157,57 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
div = 1;
}
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
- RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
- (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
- RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
- (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
- (0x1 << PLL_BR_SHIFT),
- RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
- RG_HDMITX_PLL_BR);
+ mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_PLL_PREDIV, pre_div);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR,
+ FIELD_PREP(RG_HDMITX_PLL_IC, 0x1) |
+ FIELD_PREP(RG_HDMITX_PLL_IR, 0x1));
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV, div);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV,
+ FIELD_PREP(RG_HDMITX_PLL_FBKSEL, 0x1) |
+ FIELD_PREP(RG_HDMITX_PLL_FBKDIV, 19));
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_DIVEN, 0x2);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+ RG_HDMITX_PLL_BR,
+ FIELD_PREP(RG_HDMITX_PLL_BP, 0xc) |
+ FIELD_PREP(RG_HDMITX_PLL_BC, 0x2) |
+ FIELD_PREP(RG_HDMITX_PLL_BR, 0x1));
if (rate < 165000000) {
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_PRD_IMP_EN);
+ mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x3;
imp_en = 0x0;
hdmi_ibias = hdmi_phy->ibias;
} else {
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_PRD_IMP_EN);
+ mtk_phy_set_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x6;
imp_en = 0xf;
hdmi_ibias = hdmi_phy->ibias_up;
}
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
- (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
- (pre_ibias << PRD_IBIAS_D2_SHIFT) |
- (pre_ibias << PRD_IBIAS_D1_SHIFT) |
- (pre_ibias << PRD_IBIAS_D0_SHIFT),
- RG_HDMITX_PRD_IBIAS_CLK |
- RG_HDMITX_PRD_IBIAS_D2 |
- RG_HDMITX_PRD_IBIAS_D1 |
- RG_HDMITX_PRD_IBIAS_D0);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
- (imp_en << DRV_IMP_EN_SHIFT),
- RG_HDMITX_DRV_IMP_EN);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
- (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
- (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
- (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
- (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
- RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
- RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
- (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
- RG_HDMITX_DRV_IBIAS_CLK |
- RG_HDMITX_DRV_IBIAS_D2 |
- RG_HDMITX_DRV_IBIAS_D1 |
- RG_HDMITX_DRV_IBIAS_D0);
+ mtk_phy_update_bits(base + HDMI_CON4,
+ RG_HDMITX_PRD_IBIAS_CLK | RG_HDMITX_PRD_IBIAS_D2 |
+ RG_HDMITX_PRD_IBIAS_D1 | RG_HDMITX_PRD_IBIAS_D0,
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_CLK, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D2, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D1, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D0, pre_ibias));
+ mtk_phy_update_field(base + HDMI_CON3, RG_HDMITX_DRV_IMP_EN, imp_en);
+ mtk_phy_update_bits(base + HDMI_CON6,
+ RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+ RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0,
+ FIELD_PREP(RG_HDMITX_DRV_IMP_CLK, hdmi_phy->drv_imp_clk) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D2, hdmi_phy->drv_imp_d2) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D1, hdmi_phy->drv_imp_d1) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D0, hdmi_phy->drv_imp_d0));
+ mtk_phy_update_bits(base + HDMI_CON5,
+ RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0,
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_CLK, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D2, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D1, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D0, hdmi_ibias));
return 0;
}
@@ -257,17 +229,17 @@ static const struct clk_ops mtk_hdmi_phy_pll_ops = {
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
- RG_HDMITX_DRV_EN);
+ mtk_phy_set_bits(hdmi_phy->regs + HDMI_CON3,
+ RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_DRV_EN);
usleep_range(100, 150);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
- RG_HDMITX_SER_EN);
+ mtk_phy_clear_bits(hdmi_phy->regs + HDMI_CON3,
+ RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_SER_EN);
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index d4bd419abc3c..b16d437d6721 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -15,39 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.owner = THIS_MODULE,
};
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 val, u32 mask)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
{
return container_of(hw, struct mtk_hdmi_phy, pll_hw);
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index dcf9bb13699b..c7fa65cff989 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -42,12 +41,6 @@ struct mtk_hdmi_phy {
unsigned int ibias_up;
};
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits);
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits);
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
index 500fcdab165d..d20ad5e5be81 100644
--- a/drivers/phy/mediatek/phy-mtk-io.h
+++ b/drivers/phy/mediatek/phy-mtk-io.h
@@ -8,6 +8,7 @@
#ifndef __PHY_MTK_H__
#define __PHY_MTK_H__
+#include <linux/bitfield.h>
#include <linux/io.h>
static inline void mtk_phy_clear_bits(void __iomem *reg, u32 bits)
@@ -35,4 +36,11 @@ static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
writel(tmp, reg);
}
+/* field @mask shall be constant and continuous */
+#define mtk_phy_update_field(reg, mask, val) \
+({ \
+ typeof(mask) mask_ = (mask); \
+ mtk_phy_update_bits(reg, mask_, FIELD_PREP(mask_, val)); \
+})
+
#endif
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
index 7a847954594f..673cb0f08959 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
@@ -4,14 +4,15 @@
* Author: jitao.shi <jitao.shi@mediatek.com>
*/
+#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_DSI_CON 0x00
#define RG_DSI_LDOCORE_EN BIT(0)
#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_BCLK_SEL GENMASK(3, 2)
+#define RG_DSI_LD_IDX_SEL GENMASK(6, 4)
+#define RG_DSI_PHYCLK_SEL GENMASK(9, 8)
#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
#define RG_DSI_LPTX_CLMP_EN BIT(11)
@@ -27,41 +28,46 @@
#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+#define RG_DSI_LNTx_RT_CODE GENMASK(11, 8)
#define MIPITX_DSI_TOP_CON 0x40
#define RG_DSI_LNT_INTR_EN BIT(0)
#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_LNT_IMP_CAL_CODE GENMASK(7, 4)
+#define RG_DSI_LNT_AIO_SEL GENMASK(10, 8)
#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
+#define RG_DSI_PRESERVE GENMASK(15, 13)
#define MIPITX_DSI_BG_CON 0x44
#define RG_DSI_BG_CORE_EN BIT(0)
#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_DIV GENMASK(3, 2)
#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define RG_DSI_V12_SEL GENMASK(7, 5)
+#define RG_DSI_V10_SEL GENMASK(10, 8)
+#define RG_DSI_V072_SEL GENMASK(13, 11)
+#define RG_DSI_V04_SEL GENMASK(16, 14)
+#define RG_DSI_V032_SEL GENMASK(19, 17)
+#define RG_DSI_V02_SEL GENMASK(22, 20)
+#define RG_DSI_VOUT_MSK \
+ (RG_DSI_V12_SEL | RG_DSI_V10_SEL | RG_DSI_V072_SEL | \
+ RG_DSI_V04_SEL | RG_DSI_V032_SEL | RG_DSI_V02_SEL)
+#define RG_DSI_BG_R1_TRIM GENMASK(27, 24)
+#define RG_DSI_BG_R2_TRIM GENMASK(31, 28)
#define MIPITX_DSI_PLL_CON0 0x50
#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_PREDIV GENMASK(2, 1)
+#define RG_DSI_MPPLL_TXDIV0 GENMASK(4, 3)
+#define RG_DSI_MPPLL_TXDIV1 GENMASK(6, 5)
+#define RG_DSI_MPPLL_POSDIV GENMASK(9, 7)
+#define RG_DSI_MPPLL_DIV_MSK \
+ (RG_DSI_MPPLL_PREDIV | RG_DSI_MPPLL_TXDIV0 | \
+ RG_DSI_MPPLL_TXDIV1 | RG_DSI_MPPLL_POSDIV)
#define RG_DSI_MPPLL_MONVC_EN BIT(10)
#define RG_DSI_MPPLL_MONREF_EN BIT(11)
#define RG_DSI_MPPLL_VOD_EN BIT(12)
@@ -70,12 +76,12 @@
#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+#define RG_DSI_MPPLL_SDM_SSC_PRD GENMASK(31, 16)
#define MIPITX_DSI_PLL_CON2 0x58
#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+#define RG_DSI_MPPLL_PRESERVE GENMASK(15, 8)
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
@@ -116,6 +122,7 @@
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
u8 txdiv, txdiv0, txdiv1;
u64 pcw;
@@ -145,34 +152,38 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
return -EINVAL;
}
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK | RG_DSI_BG_CKEN |
+ RG_DSI_BG_CORE_EN,
+ FIELD_PREP(RG_DSI_V02_SEL, 4) |
+ FIELD_PREP(RG_DSI_V032_SEL, 4) |
+ FIELD_PREP(RG_DSI_V04_SEL, 4) |
+ FIELD_PREP(RG_DSI_V072_SEL, 4) |
+ FIELD_PREP(RG_DSI_V10_SEL, 4) |
+ FIELD_PREP(RG_DSI_V12_SEL, 4) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
usleep_range(30, 100);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ FIELD_PREP(RG_DSI_LNT_IMP_CAL_CODE, 8) |
+ RG_DSI_LNT_HS_BIAS_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON | RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ FIELD_PREP(RG_DSI_MPPLL_TXDIV0, txdiv0) |
+ FIELD_PREP(RG_DSI_MPPLL_TXDIV1, txdiv1));
/*
* PLL PCW config
@@ -182,23 +193,20 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
* Post DIV =4, so need data_Rate*4
* Ref_clk is 26MHz
*/
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, 26000000);
+ writel(pcw, base + MIPITX_DSI_PLL_CON2);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_FRA_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
usleep_range(20, 100);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_SSC_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
+ mtk_phy_update_field(base + MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
return 0;
}
@@ -206,31 +214,27 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
dev_dbg(mipi_tx->dev, "unprepare\n");
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_TOP, RG_DSI_MPPLL_PRESERVE);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN | RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_TOP_CON, RG_DSI_LNT_HS_BIAS_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_DIV_MSK);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -254,10 +258,10 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+ mtk_phy_set_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
+ mtk_phy_clear_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
@@ -265,23 +269,23 @@ static void mtk_mipi_tx_power_off_signal(struct phy *phy)
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
u32 reg;
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
+ mtk_phy_set_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+ mtk_phy_clear_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
}
const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8),
+ .mppll_preserve = 3,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
};
const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8),
+ .mppll_preserve = 0,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
index 99108426d57c..f021ec5a70e5 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
@@ -4,6 +4,7 @@
* Author: jitao.shi <jitao.shi@mediatek.com>
*/
+#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_LANE_CON 0x000c
@@ -18,7 +19,7 @@
#define RG_DSI_PAD_TIEL_SEL BIT(8)
#define MIPITX_VOLTAGE_SEL 0x0010
-#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6)
+#define RG_DSI_HSTX_LDO_REF_SEL GENMASK(9, 6)
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
@@ -26,7 +27,7 @@
#define MIPITX_PLL_CON2 0x0034
#define MIPITX_PLL_CON3 0x0038
#define MIPITX_PLL_CON4 0x003c
-#define RG_DSI_PLL_IBIAS (3 << 10)
+#define RG_DSI_PLL_IBIAS GENMASK(11, 10)
#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
@@ -41,11 +42,12 @@
#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
#define RG_DSI_PLL_EN BIT(4)
-#define RG_DSI_PLL_POSDIV (0x7 << 8)
+#define RG_DSI_PLL_POSDIV GENMASK(10, 8)
static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
unsigned int txdiv, txdiv0;
u64 pcw;
@@ -70,17 +72,16 @@ static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
return -EINVAL;
}
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
udelay(1);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
- txdiv0 << 8);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ writel(pcw, base + MIPITX_PLL_CON0);
+ mtk_phy_update_field(base + MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, txdiv0);
+ mtk_phy_set_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
return 0;
}
@@ -88,11 +89,12 @@ static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -121,7 +123,7 @@ static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
mipi_tx->rt_code[i] |= 0x10 << 5;
for (j = 0; j < 10; j++)
- mtk_mipi_tx_update_bits(mipi_tx,
+ mtk_phy_update_bits(mipi_tx->regs +
MIPITX_D2P_RTCODE * (i + 1) + j * 4,
1, mipi_tx->rt_code[i] >> j & 1);
}
@@ -130,44 +132,42 @@ static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ void __iomem *base = mipi_tx->regs;
/* BG_LPF_EN / BG_CORE_EN */
- writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
usleep_range(30, 100);
- writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, base + MIPITX_LANE_CON);
/* Switch OFF each Lane */
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
- RG_DSI_HSTX_LDO_REF_SEL,
- (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+ mtk_phy_update_field(base + MIPITX_VOLTAGE_SEL, RG_DSI_HSTX_LDO_REF_SEL,
+ (mipi_tx->mipitx_drive - 3000) / 200);
mtk_mipi_tx_config_calibration_data(mipi_tx);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+ mtk_phy_set_bits(base + MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ void __iomem *base = mipi_tx->regs;
/* Switch ON each Lane */
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
-
- writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
- writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+ mtk_phy_set_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, base + MIPITX_LANE_CON);
}
const struct mtk_mipitx_data mt8183_mipitx_data = {
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 28506932bd91..cf9c386385bb 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -10,30 +10,6 @@ inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel(temp & ~bits, mipi_tx->regs + offset);
-}
-
-void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel(temp | bits, mipi_tx->regs + offset);
-}
-
-void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
-}
-
int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
index c76f07c3fdeb..47b60b1a7226 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
@@ -10,7 +10,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
@@ -37,10 +36,6 @@ struct mtk_mipi_tx {
};
struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
-void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
-void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
-void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
- u32 data);
int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c
index 7f29d43442bf..25dbd6e35722 100644
--- a/drivers/phy/mediatek/phy-mtk-pcie.c
+++ b/drivers/phy/mediatek/phy-mtk-pcie.c
@@ -89,14 +89,14 @@ static void mtk_pcie_efuse_set_lane(struct mtk_pcie_phy *pcie_phy,
addr = pcie_phy->sif_base + PEXTP_ANA_LN0_TRX_REG +
lane * PEXTP_ANA_LANE_OFFSET;
- mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
- FIELD_PREP(EFUSE_LN_TX_PMOS_SEL, data->tx_pmos));
+ mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
+ data->tx_pmos);
- mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
- FIELD_PREP(EFUSE_LN_TX_NMOS_SEL, data->tx_nmos));
+ mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
+ data->tx_nmos);
- mtk_phy_update_bits(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
- FIELD_PREP(EFUSE_LN_RX_SEL, data->rx_data));
+ mtk_phy_update_field(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
+ data->rx_data);
}
/**
@@ -116,9 +116,8 @@ static int mtk_pcie_phy_init(struct phy *phy)
return 0;
/* Set global data */
- mtk_phy_update_bits(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
- EFUSE_GLB_INTR_SEL,
- FIELD_PREP(EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr));
+ mtk_phy_update_field(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
+ EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr);
for (i = 0; i < pcie_phy->data->num_lanes; i++)
mtk_pcie_efuse_set_lane(pcie_phy, i);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 8ee7682b8e93..e906a82791bd 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -49,35 +49,28 @@
#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_USB20_PLL_PREDIV GENMASK(7, 6)
-#define PA0_USB20_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
#define PA0_RG_USB20_INTR_EN BIT(5)
#define U3P_USBPHYACR1 0x004
#define PA1_RG_INTR_CAL GENMASK(23, 19)
-#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
#define PA1_RG_VRT_SEL GENMASK(14, 12)
-#define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
#define PA1_RG_TERM_SEL GENMASK(10, 8)
-#define PA1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8)
#define U3P_USBPHYACR2 0x008
#define PA2_RG_U2PLL_BW GENMASK(21, 19)
-#define PA2_RG_U2PLL_BW_VAL(x) ((0x7 & (x)) << 19)
#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
#define U3P_USBPHYACR5 0x014
#define PA5_RG_U2_HSTX_SRCAL_EN BIT(15)
#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12)
-#define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
#define PA5_RG_U2_HS_100U_U3_EN BIT(11)
#define U3P_USBPHYACR6 0x018
+#define PA6_RG_U2_PRE_EMP GENMASK(31, 30)
#define PA6_RG_U2_BC11_SW_EN BIT(23)
#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
#define PA6_RG_U2_DISCTH GENMASK(7, 4)
-#define PA6_RG_U2_DISCTH_VAL(x) ((0xf & (x)) << 4)
#define PA6_RG_U2_SQTH GENMASK(3, 0)
-#define PA6_RG_U2_SQTH_VAL(x) (0xf & (x))
#define U3P_U2PHYACR4 0x020
#define P2C_RG_USB20_GPIO_CTL BIT(9)
@@ -104,11 +97,9 @@
#define P2C_FORCE_SUSPENDM BIT(18)
#define P2C_FORCE_TERMSEL BIT(17)
#define P2C_RG_DATAIN GENMASK(13, 10)
-#define P2C_RG_DATAIN_VAL(x) ((0xf & (x)) << 10)
#define P2C_RG_DMPULLDOWN BIT(7)
#define P2C_RG_DPPULLDOWN BIT(6)
#define P2C_RG_XCVRSEL GENMASK(5, 4)
-#define P2C_RG_XCVRSEL_VAL(x) ((0x3 & (x)) << 4)
#define P2C_RG_SUSPENDM BIT(3)
#define P2C_RG_TERMSEL BIT(2)
#define P2C_DTM0_PART_MASK \
@@ -139,87 +130,65 @@
#define U3P_U3_PHYA_REG0 0x000
#define P3A_RG_IEXT_INTR GENMASK(15, 10)
-#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10)
#define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
-#define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2)
#define U3P_U3_PHYA_REG1 0x004
#define P3A_RG_CLKDRV_AMP GENMASK(31, 29)
-#define P3A_RG_CLKDRV_AMP_VAL(x) ((0x7 & (x)) << 29)
#define U3P_U3_PHYA_REG6 0x018
#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28)
-#define P3A_RG_TX_EIDLE_CM_VAL(x) ((0xf & (x)) << 28)
#define U3P_U3_PHYA_REG9 0x024
#define P3A_RG_RX_DAC_MUX GENMASK(5, 1)
-#define P3A_RG_RX_DAC_MUX_VAL(x) ((0x1f & (x)) << 1)
#define U3P_U3_PHYA_DA_REG0 0x100
#define P3A_RG_XTAL_EXT_PE2H GENMASK(17, 16)
-#define P3A_RG_XTAL_EXT_PE2H_VAL(x) ((0x3 & (x)) << 16)
#define P3A_RG_XTAL_EXT_PE1H GENMASK(13, 12)
-#define P3A_RG_XTAL_EXT_PE1H_VAL(x) ((0x3 & (x)) << 12)
#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10)
-#define P3A_RG_XTAL_EXT_EN_U3_VAL(x) ((0x3 & (x)) << 10)
#define U3P_U3_PHYA_DA_REG4 0x108
#define P3A_RG_PLL_DIVEN_PE2H GENMASK(21, 19)
#define P3A_RG_PLL_BC_PE2H GENMASK(7, 6)
-#define P3A_RG_PLL_BC_PE2H_VAL(x) ((0x3 & (x)) << 6)
#define U3P_U3_PHYA_DA_REG5 0x10c
#define P3A_RG_PLL_BR_PE2H GENMASK(29, 28)
-#define P3A_RG_PLL_BR_PE2H_VAL(x) ((0x3 & (x)) << 28)
#define P3A_RG_PLL_IC_PE2H GENMASK(15, 12)
-#define P3A_RG_PLL_IC_PE2H_VAL(x) ((0xf & (x)) << 12)
#define U3P_U3_PHYA_DA_REG6 0x110
#define P3A_RG_PLL_IR_PE2H GENMASK(19, 16)
-#define P3A_RG_PLL_IR_PE2H_VAL(x) ((0xf & (x)) << 16)
#define U3P_U3_PHYA_DA_REG7 0x114
#define P3A_RG_PLL_BP_PE2H GENMASK(19, 16)
-#define P3A_RG_PLL_BP_PE2H_VAL(x) ((0xf & (x)) << 16)
#define U3P_U3_PHYA_DA_REG20 0x13c
#define P3A_RG_PLL_DELTA1_PE2H GENMASK(31, 16)
-#define P3A_RG_PLL_DELTA1_PE2H_VAL(x) ((0xffff & (x)) << 16)
#define U3P_U3_PHYA_DA_REG25 0x148
#define P3A_RG_PLL_DELTA_PE2H GENMASK(15, 0)
-#define P3A_RG_PLL_DELTA_PE2H_VAL(x) (0xffff & (x))
#define U3P_U3_PHYD_LFPS1 0x00c
#define P3D_RG_FWAKE_TH GENMASK(21, 16)
-#define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
#define U3P_U3_PHYD_IMPCAL0 0x010
#define P3D_RG_FORCE_TX_IMPEL BIT(31)
#define P3D_RG_TX_IMPEL GENMASK(28, 24)
-#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
#define U3P_U3_PHYD_IMPCAL1 0x014
#define P3D_RG_FORCE_RX_IMPEL BIT(31)
#define P3D_RG_RX_IMPEL GENMASK(28, 24)
-#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
#define U3P_U3_PHYD_RSV 0x054
#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
-#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
-#define P3D_RG_CDR_BIR_LTD0_VAL(x) ((0x1f & (x)) << 8)
#define U3P_U3_PHYD_RXDET1 0x128
#define P3D_RG_RXDET_STB2_SET GENMASK(17, 9)
-#define P3D_RG_RXDET_STB2_SET_VAL(x) ((0x1ff & (x)) << 9)
#define U3P_U3_PHYD_RXDET2 0x12c
#define P3D_RG_RXDET_STB2_SET_P3 GENMASK(8, 0)
-#define P3D_RG_RXDET_STB2_SET_P3_VAL(x) (0x1ff & (x))
#define U3P_SPLLC_XTALCTL3 0x018
#define XC3_RG_U3_XTAL_RX_PWD BIT(9)
@@ -227,10 +196,8 @@
#define U3P_U2FREQ_FMCR0 0x00
#define P2F_RG_MONCLK_SEL GENMASK(27, 26)
-#define P2F_RG_MONCLK_SEL_VAL(x) ((0x3 & (x)) << 26)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
-#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x))
#define U3P_U2FREQ_VALUE 0x0c
@@ -247,60 +214,45 @@
#define PHYD_CTRL_SIGNAL_MODE4 0x1c
/* CDR Charge Pump P-path current adjustment */
#define RG_CDR_BICLTD1_GEN1_MSK GENMASK(23, 20)
-#define RG_CDR_BICLTD1_GEN1_VAL(x) ((0xf & (x)) << 20)
#define RG_CDR_BICLTD0_GEN1_MSK GENMASK(11, 8)
-#define RG_CDR_BICLTD0_GEN1_VAL(x) ((0xf & (x)) << 8)
#define PHYD_DESIGN_OPTION2 0x24
/* Symbol lock count selection */
#define RG_LOCK_CNT_SEL_MSK GENMASK(5, 4)
-#define RG_LOCK_CNT_SEL_VAL(x) ((0x3 & (x)) << 4)
#define PHYD_DESIGN_OPTION9 0x40
/* COMWAK GAP width window */
#define RG_TG_MAX_MSK GENMASK(20, 16)
-#define RG_TG_MAX_VAL(x) ((0x1f & (x)) << 16)
/* COMINIT GAP width window */
#define RG_T2_MAX_MSK GENMASK(13, 8)
-#define RG_T2_MAX_VAL(x) ((0x3f & (x)) << 8)
/* COMWAK GAP width window */
#define RG_TG_MIN_MSK GENMASK(7, 5)
-#define RG_TG_MIN_VAL(x) ((0x7 & (x)) << 5)
/* COMINIT GAP width window */
#define RG_T2_MIN_MSK GENMASK(4, 0)
-#define RG_T2_MIN_VAL(x) (0x1f & (x))
#define ANA_RG_CTRL_SIGNAL1 0x4c
/* TX driver tail current control for 0dB de-empahsis mdoe for Gen1 speed */
#define RG_IDRV_0DB_GEN1_MSK GENMASK(13, 8)
-#define RG_IDRV_0DB_GEN1_VAL(x) ((0x3f & (x)) << 8)
#define ANA_RG_CTRL_SIGNAL4 0x58
#define RG_CDR_BICLTR_GEN1_MSK GENMASK(23, 20)
-#define RG_CDR_BICLTR_GEN1_VAL(x) ((0xf & (x)) << 20)
/* Loop filter R1 resistance adjustment for Gen1 speed */
#define RG_CDR_BR_GEN2_MSK GENMASK(10, 8)
-#define RG_CDR_BR_GEN2_VAL(x) ((0x7 & (x)) << 8)
#define ANA_RG_CTRL_SIGNAL6 0x60
/* I-path capacitance adjustment for Gen1 */
#define RG_CDR_BC_GEN1_MSK GENMASK(28, 24)
-#define RG_CDR_BC_GEN1_VAL(x) ((0x1f & (x)) << 24)
#define RG_CDR_BIRLTR_GEN1_MSK GENMASK(4, 0)
-#define RG_CDR_BIRLTR_GEN1_VAL(x) (0x1f & (x))
#define ANA_EQ_EYE_CTRL_SIGNAL1 0x6c
/* RX Gen1 LEQ tuning step */
#define RG_EQ_DLEQ_LFI_GEN1_MSK GENMASK(11, 8)
-#define RG_EQ_DLEQ_LFI_GEN1_VAL(x) ((0xf & (x)) << 8)
#define ANA_EQ_EYE_CTRL_SIGNAL4 0xd8
#define RG_CDR_BIRLTD0_GEN1_MSK GENMASK(20, 16)
-#define RG_CDR_BIRLTD0_GEN1_VAL(x) ((0x1f & (x)) << 16)
#define ANA_EQ_EYE_CTRL_SIGNAL5 0xdc
#define RG_CDR_BIRLTD0_GEN3_MSK GENMASK(4, 0)
-#define RG_CDR_BIRLTD0_GEN3_VAL(x) (0x1f & (x))
/* PHY switch between pcie/usb3/sgmii/sata */
#define USB_PHY_SWITCH_CTRL 0x0
@@ -370,6 +322,7 @@ struct mtk_phy_instance {
int eye_term;
int intr;
int discth;
+ int pre_emphasis;
bool bc12_en;
};
@@ -411,9 +364,9 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
/* set cycle count as 1024, and select u2 channel */
tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp &= ~(P2F_RG_CYCLECNT | P2F_RG_MONCLK_SEL);
- tmp |= P2F_RG_CYCLECNT_VAL(U3P_FM_DET_CYCLE_CNT);
+ tmp |= FIELD_PREP(P2F_RG_CYCLECNT, U3P_FM_DET_CYCLE_CNT);
if (tphy->pdata->version == MTK_PHY_V1)
- tmp |= P2F_RG_MONCLK_SEL_VAL(instance->index >> 1);
+ tmp |= FIELD_PREP(P2F_RG_MONCLK_SEL, instance->index >> 1);
writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
@@ -446,8 +399,8 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
tphy->src_ref_clk, tphy->src_coef);
/* set HS slew rate */
- mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
- PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val));
+ mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ calibration_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
@@ -457,33 +410,30 @@ static void u3_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
+ void __iomem *phya = u3_banks->phya;
+ void __iomem *phyd = u3_banks->phyd;
/* gating PCIe Analog XTAL clock */
mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
/* gating XSQ */
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_DA_REG0,
- P3A_RG_XTAL_EXT_EN_U3, P3A_RG_XTAL_EXT_EN_U3_VAL(2));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG0, P3A_RG_XTAL_EXT_EN_U3, 2);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG9,
- P3A_RG_RX_DAC_MUX, P3A_RG_RX_DAC_MUX_VAL(4));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG9, P3A_RG_RX_DAC_MUX, 4);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG6,
- P3A_RG_TX_EIDLE_CM, P3A_RG_TX_EIDLE_CM_VAL(0xe));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG6, P3A_RG_TX_EIDLE_CM, 0xe);
mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1,
P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1,
- P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3));
+ FIELD_PREP(P3D_RG_CDR_BIR_LTD0, 0xc) |
+ FIELD_PREP(P3D_RG_CDR_BIR_LTD1, 0x3));
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_LFPS1,
- P3D_RG_FWAKE_TH, P3D_RG_FWAKE_TH_VAL(0x34));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_LFPS1, P3D_RG_FWAKE_TH, 0x34);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
- P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET1, P3D_RG_RXDET_STB2_SET, 0x10);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
- P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET2, P3D_RG_RXDET_STB2_SET_P3, 0x10);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -497,11 +447,9 @@ static void u2_phy_pll_26m_set(struct mtk_tphy *tphy,
if (!tphy->pdata->sw_pll_48m_to_26m)
return;
- mtk_phy_update_bits(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV,
- PA0_USB20_PLL_PREDIV_VAL(0));
+ mtk_phy_update_field(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV, 0);
- mtk_phy_update_bits(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW,
- PA2_RG_U2PLL_BW_VAL(3));
+ mtk_phy_update_field(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW, 3);
writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV);
@@ -519,8 +467,8 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* switch to USB function, and enable usb pll */
mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
- mtk_phy_update_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN,
- P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0));
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN);
@@ -529,8 +477,7 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* disable switch 100uA current to SSUSB */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN);
- if (!index)
- mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
+ mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
if (tphy->pdata->avoid_rx_sen_degradation) {
if (!index) {
@@ -548,7 +495,7 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* DP/DM BC1.1 path Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN);
- mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, PA6_RG_U2_SQTH_VAL(2));
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, 2);
/* Workaround only for mt8195, HW fix it for others (V3) */
u2_phy_pll_26m_set(tphy, instance);
@@ -563,9 +510,6 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
- P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
-
/* OTG Enable */
mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
@@ -588,8 +532,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN);
-
/* OTG Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
@@ -656,43 +598,39 @@ static void pcie_phy_instance_init(struct mtk_tphy *tphy,
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0,
P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H,
- P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2));
+ FIELD_PREP(P3A_RG_XTAL_EXT_PE1H, 0x2) |
+ FIELD_PREP(P3A_RG_XTAL_EXT_PE2H, 0x2));
/* ref clk drive */
- mtk_phy_update_bits(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP,
- P3A_RG_CLKDRV_AMP_VAL(0x4));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP, 0x4);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF,
- P3A_RG_CLKDRV_OFF_VAL(0x1));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF, 0x1);
/* SSC delta -5000ppm */
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H,
- P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H, 0x3c);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H,
- P3A_RG_PLL_DELTA_PE2H_VAL(0x36));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H, 0x36);
/* change pll BW 0.6M */
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5,
P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H,
- P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1));
+ FIELD_PREP(P3A_RG_PLL_BR_PE2H, 0x1) |
+ FIELD_PREP(P3A_RG_PLL_IC_PE2H, 0x1));
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4,
P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H,
- P3A_RG_PLL_BC_PE2H_VAL(0x3));
+ FIELD_PREP(P3A_RG_PLL_BC_PE2H, 0x3));
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H,
- P3A_RG_PLL_IR_PE2H_VAL(0x2));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H, 0x2);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H,
- P3A_RG_PLL_BP_PE2H_VAL(0xa));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H, 0xa);
/* Tx Detect Rx Timing: 10us -> 5us */
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
- P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, 0x10);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
- P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, 0x10);
/* wait for PCIe subsys register to active */
usleep_range(2500, 3000);
@@ -733,38 +671,38 @@ static void sata_phy_instance_init(struct mtk_tphy *tphy,
/* charge current adjustment */
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6,
RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK,
- RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a));
+ FIELD_PREP(RG_CDR_BIRLTR_GEN1_MSK, 0x6) |
+ FIELD_PREP(RG_CDR_BC_GEN1_MSK, 0x1a));
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK,
- RG_CDR_BIRLTD0_GEN1_VAL(0x18));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK, 0x18);
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK,
- RG_CDR_BIRLTD0_GEN3_VAL(0x06));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK, 0x06);
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4,
RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK,
- RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07));
+ FIELD_PREP(RG_CDR_BICLTR_GEN1_MSK, 0x0c) |
+ FIELD_PREP(RG_CDR_BR_GEN2_MSK, 0x07));
mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4,
RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK,
- RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02));
+ FIELD_PREP(RG_CDR_BICLTD0_GEN1_MSK, 0x08) |
+ FIELD_PREP(RG_CDR_BICLTD1_GEN1_MSK, 0x02));
- mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK,
- RG_LOCK_CNT_SEL_VAL(0x02));
+ mtk_phy_update_field(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK, 0x02);
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MIN_MSK | RG_TG_MIN_MSK,
- RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04));
+ FIELD_PREP(RG_T2_MIN_MSK, 0x12) |
+ FIELD_PREP(RG_TG_MIN_MSK, 0x04));
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MAX_MSK | RG_TG_MAX_MSK,
- RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e));
+ FIELD_PREP(RG_T2_MAX_MSK, 0x31) |
+ FIELD_PREP(RG_TG_MAX_MSK, 0x0e));
- mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK,
- RG_IDRV_0DB_GEN1_VAL(0x20));
+ mtk_phy_update_field(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK, 0x20);
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK,
- RG_EQ_DLEQ_LFI_GEN1_VAL(0x03));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK, 0x03);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -841,10 +779,13 @@ static void phy_parse_property(struct mtk_tphy *tphy,
&instance->intr);
device_property_read_u32(dev, "mediatek,discth",
&instance->discth);
+ device_property_read_u32(dev, "mediatek,pre-emphasis",
+ &instance->pre_emphasis);
dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n",
instance->bc12_en, instance->eye_src,
instance->eye_vrt, instance->eye_term,
instance->intr, instance->discth);
+ dev_dbg(dev, "pre-emp:%d\n", instance->pre_emphasis);
}
static void u2_phy_props_set(struct mtk_tphy *tphy,
@@ -857,24 +798,33 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN);
if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src)
- mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
- PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src));
+ mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ instance->eye_src);
if (instance->eye_vrt)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
- PA1_RG_VRT_SEL_VAL(instance->eye_vrt));
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
+ instance->eye_vrt);
if (instance->eye_term)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
- PA1_RG_TERM_SEL_VAL(instance->eye_term));
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
+ instance->eye_term);
- if (instance->intr)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
- PA1_RG_INTR_CAL_VAL(instance->intr));
+ if (instance->intr) {
+ if (u2_banks->misc)
+ mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1,
+ MR1_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ instance->intr);
+ }
if (instance->discth)
- mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
- PA6_RG_U2_DISCTH_VAL(instance->discth));
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
+ instance->discth);
+
+ if (instance->pre_emphasis)
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_PRE_EMP,
+ instance->pre_emphasis);
}
/* type switch for usb3/pcie/sgmii/sata */
@@ -906,7 +856,7 @@ static int phy_type_syscon_get(struct mtk_phy_instance *instance,
static int phy_type_set(struct mtk_phy_instance *instance)
{
int type;
- u32 mask;
+ u32 offset;
if (!instance->type_sw)
return 0;
@@ -929,8 +879,9 @@ static int phy_type_set(struct mtk_phy_instance *instance)
return 0;
}
- mask = RG_PHY_SW_TYPE << (instance->type_sw_index * BITS_PER_BYTE);
- regmap_update_bits(instance->type_sw, instance->type_sw_reg, mask, type);
+ offset = instance->type_sw_index * BITS_PER_BYTE;
+ regmap_update_bits(instance->type_sw, instance->type_sw_reg,
+ RG_PHY_SW_TYPE << offset, type << offset);
return 0;
}
@@ -1022,23 +973,23 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
case PHY_TYPE_USB2:
mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS);
- mtk_phy_update_bits(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
- PA1_RG_INTR_CAL_VAL(instance->efuse_intr));
+ mtk_phy_update_field(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ instance->efuse_intr);
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
- P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
+ instance->efuse_tx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
- P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
+ instance->efuse_rx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
- P3A_RG_IEXT_INTR_VAL(instance->efuse_intr));
+ mtk_phy_update_field(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
+ instance->efuse_intr);
break;
default:
dev_warn(dev, "no sw efuse for type %d\n", instance->type);
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
index a6af06941203..fc19e0fa8ed5 100644
--- a/drivers/phy/mediatek/phy-mtk-ufs.c
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -11,6 +11,8 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include "phy-mtk-io.h"
+
/* mphy register and offsets */
#define MP_GLB_DIG_8C 0x008C
#define FRC_PLL_ISO_EN BIT(8)
@@ -39,34 +41,6 @@ struct ufs_mtk_phy {
struct clk_bulk_data clks[UFSPHY_CLKS_CNT];
};
-static inline u32 mphy_readl(struct ufs_mtk_phy *phy, u32 reg)
-{
- return readl(phy->mmio + reg);
-}
-
-static inline void mphy_writel(struct ufs_mtk_phy *phy, u32 val, u32 reg)
-{
- writel(val, phy->mmio + reg);
-}
-
-static void mphy_set_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
-{
- u32 val;
-
- val = mphy_readl(phy, reg);
- val |= bit;
- mphy_writel(phy, val, reg);
-}
-
-static void mphy_clr_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
-{
- u32 val;
-
- val = mphy_readl(phy, reg);
- val &= ~bit;
- mphy_writel(phy, val, reg);
-}
-
static struct ufs_mtk_phy *get_ufs_mtk_phy(struct phy *generic_phy)
{
return (struct ufs_mtk_phy *)phy_get_drvdata(generic_phy);
@@ -84,57 +58,61 @@ static int ufs_mtk_phy_clk_init(struct ufs_mtk_phy *phy)
static void ufs_mtk_phy_set_active(struct ufs_mtk_phy *phy)
{
+ void __iomem *mmio = phy->mmio;
+
/* release DA_MP_PLL_PWR_ON */
- mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
/* release DA_MP_PLL_ISO_EN */
- mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
/* release DA_MP_CDR_PWR_ON */
- mphy_set_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
- mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
/* release DA_MP_CDR_ISO_EN */
- mphy_clr_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
- mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
/* release DA_MP_RX0_SQ_EN */
- mphy_set_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
- mphy_clr_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
/* delay 1us to wait DIFZ stable */
udelay(1);
/* release DIFZ */
- mphy_clr_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
}
static void ufs_mtk_phy_set_deep_hibern(struct ufs_mtk_phy *phy)
{
+ void __iomem *mmio = phy->mmio;
+
/* force DIFZ */
- mphy_set_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
/* force DA_MP_RX0_SQ_EN */
- mphy_set_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
- mphy_clr_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
/* force DA_MP_CDR_ISO_EN */
- mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
- mphy_set_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
/* force DA_MP_CDR_PWR_ON */
- mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
- mphy_clr_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
/* force DA_MP_PLL_ISO_EN */
- mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
- mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
/* force DA_MP_PLL_PWR_ON */
- mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
}
static int ufs_mtk_phy_power_on(struct phy *generic_phy)
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index c0cdb78f77fa..b222fbbd71d1 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -37,7 +37,6 @@
#define XSP_U2FREQ_FMCR0 ((SSUSB_SIFSLV_U2FREQ) + 0x00)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
-#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x))
#define XSP_U2FREQ_MMONR0 ((SSUSB_SIFSLV_U2FREQ) + 0x0c)
@@ -50,16 +49,12 @@
#define XSP_USBPHYACR1 ((SSUSB_SIFSLV_U2PHY_COM) + 0x04)
#define P2A1_RG_INTR_CAL GENMASK(23, 19)
-#define P2A1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
#define P2A1_RG_VRT_SEL GENMASK(14, 12)
-#define P2A1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
#define P2A1_RG_TERM_SEL GENMASK(10, 8)
-#define P2A1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8)
#define XSP_USBPHYACR5 ((SSUSB_SIFSLV_U2PHY_COM) + 0x014)
#define P2A5_RG_HSTX_SRCAL_EN BIT(15)
#define P2A5_RG_HSTX_SRCTRL GENMASK(14, 12)
-#define P2A5_RG_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
#define XSP_USBPHYACR6 ((SSUSB_SIFSLV_U2PHY_COM) + 0x018)
#define P2A6_RG_BC11_SW_EN BIT(23)
@@ -74,15 +69,12 @@
#define SSPXTP_PHYA_GLB_00 ((SSPXTP_SIFSLV_PHYA_GLB) + 0x00)
#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(21, 16)
-#define RG_XTP_GLB_BIAS_INTR_CTRL_VAL(x) ((0x3f & (x)) << 16)
#define SSPXTP_PHYA_LN_04 ((SSPXTP_SIFSLV_PHYA_LN) + 0x04)
#define RG_XTP_LN0_TX_IMPSEL GENMASK(4, 0)
-#define RG_XTP_LN0_TX_IMPSEL_VAL(x) (0x1f & (x))
#define SSPXTP_PHYA_LN_14 ((SSPXTP_SIFSLV_PHYA_LN) + 0x014)
#define RG_XTP_LN0_RX_IMPSEL GENMASK(4, 0)
-#define RG_XTP_LN0_RX_IMPSEL_VAL(x) (0x1f & (x))
#define XSP_REF_CLK 26 /* MHZ */
#define XSP_SLEW_RATE_COEF 17
@@ -134,8 +126,8 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024 */
- mtk_phy_update_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
- P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT));
+ mtk_phy_update_field(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
+ XSP_FM_DET_CYCLE_CNT);
/* enable frequency meter */
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
@@ -166,8 +158,7 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
xsphy->src_ref_clk, xsphy->src_coef);
/* set HS slew rate */
- mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
- P2A5_RG_HSTX_SRCTRL_VAL(calib_val));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL, calib_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
@@ -280,20 +271,20 @@ static void u2_phy_props_set(struct mtk_xsphy *xsphy,
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
- P2A1_RG_INTR_CAL_VAL(inst->efuse_intr));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
+ inst->efuse_intr);
if (inst->eye_src)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
- P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ inst->eye_src);
if (inst->eye_vrt)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
- P2A1_RG_VRT_SEL_VAL(inst->eye_vrt));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
+ inst->eye_vrt);
if (inst->eye_term)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
- P2A1_RG_TERM_SEL_VAL(inst->eye_term));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
+ inst->eye_term);
}
static void u3_phy_props_set(struct mtk_xsphy *xsphy,
@@ -302,19 +293,16 @@ static void u3_phy_props_set(struct mtk_xsphy *xsphy,
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
- mtk_phy_update_bits(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
- RG_XTP_GLB_BIAS_INTR_CTRL,
- RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr));
+ mtk_phy_update_field(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
+ RG_XTP_GLB_BIAS_INTR_CTRL, inst->efuse_intr);
if (inst->efuse_tx_imp)
- mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_04,
- RG_XTP_LN0_TX_IMPSEL,
- RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp));
+ mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_04,
+ RG_XTP_LN0_TX_IMPSEL, inst->efuse_tx_imp);
if (inst->efuse_rx_imp)
- mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_14,
- RG_XTP_LN0_RX_IMPSEL,
- RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp));
+ mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_14,
+ RG_XTP_LN0_RX_IMPSEL, inst->efuse_rx_imp);
}
static int mtk_phy_init(struct phy *phy)
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index d1a50fa81130..c1a41b6cd29b 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -42,7 +42,10 @@
#define SERDES_MUX_QSGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
#define SERDES_MUX_RGMII(i, p, m, c) \
- SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c)
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_TXID, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_RXID, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_ID, m, c)
static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset)
{
@@ -94,21 +97,29 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
- HSIO_HW_CFG_RGMII_ENA,
- HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(0) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(2))),
SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG |
- HSIO_HW_CFG_RGMII_ENA,
- HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(0) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(3))),
SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG |
- HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(5))),
SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG |
- HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(6))),
};
struct serdes_ctrl {
@@ -382,6 +393,67 @@ static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode)
return lan966x_sd6g40_setup_lane(macro, conf, idx);
}
+static int lan966x_rgmii_setup(struct serdes_macro *macro, u32 idx, int mode)
+{
+ bool tx_delay = false;
+ bool rx_delay = false;
+
+ /* Configure RGMII */
+ lan_rmw(HSIO_RGMII_CFG_RGMII_RX_RST_SET(0) |
+ HSIO_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_RGMII_CFG_TX_CLK_CFG_SET(macro->speed == SPEED_1000 ? 1 :
+ macro->speed == SPEED_100 ? 2 :
+ macro->speed == SPEED_10 ? 3 : 0),
+ HSIO_RGMII_CFG_RGMII_RX_RST |
+ HSIO_RGMII_CFG_RGMII_TX_RST |
+ HSIO_RGMII_CFG_TX_CLK_CFG,
+ macro->ctrl->regs, HSIO_RGMII_CFG(idx));
+
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ rx_delay = true;
+
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ tx_delay = true;
+
+ /* Setup DLL configuration */
+ lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_DLL_CFG_DLL_ENA_SET(rx_delay),
+ HSIO_DLL_CFG_DLL_RST |
+ HSIO_DLL_CFG_DLL_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
+
+ lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(rx_delay),
+ HSIO_DLL_CFG_DELAY_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
+
+ lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_DLL_CFG_DLL_ENA_SET(tx_delay),
+ HSIO_DLL_CFG_DLL_RST |
+ HSIO_DLL_CFG_DLL_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
+
+ lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(tx_delay),
+ HSIO_DLL_CFG_DELAY_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
+
+ return 0;
+}
+
+static int serdes_set_speed(struct phy *phy, int speed)
+{
+ struct serdes_macro *macro = phy_get_drvdata(phy);
+
+ if (!phy_interface_mode_is_rgmii(macro->mode))
+ return 0;
+
+ macro->speed = speed;
+ lan966x_rgmii_setup(macro, macro->idx - (SERDES6G_MAX + 1), macro->mode);
+
+ return 0;
+}
+
static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct serdes_macro *macro = phy_get_drvdata(phy);
@@ -427,7 +499,9 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
macro->mode);
if (macro->idx < RGMII_MAX)
- return 0;
+ return lan966x_rgmii_setup(macro,
+ macro->idx - (SERDES6G_MAX + 1),
+ macro->mode);
return -EOPNOTSUPP;
}
@@ -437,6 +511,7 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
static const struct phy_ops serdes_ops = {
.set_mode = serdes_set_mode,
+ .set_speed = serdes_set_speed,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/microchip/lan966x_serdes_regs.h b/drivers/phy/microchip/lan966x_serdes_regs.h
index ea30f64ffd5c..ac54cd01fea6 100644
--- a/drivers/phy/microchip/lan966x_serdes_regs.h
+++ b/drivers/phy/microchip/lan966x_serdes_regs.h
@@ -206,4 +206,46 @@ enum lan966x_target {
#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\
FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x)
+/* HSIO:HW_CFGSTAT:RGMII_CFG */
+#define HSIO_RGMII_CFG(r) __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 20, r, 2, 4)
+
+#define HSIO_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_RGMII_RX_RST, x)
+
+/* HSIO:HW_CFGSTAT:DLL_CFG */
+#define HSIO_DLL_CFG(r) __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 36, r, 4, 4)
+
+#define HSIO_DLL_CFG_DELAY_ENA BIT(2)
+#define HSIO_DLL_CFG_DELAY_ENA_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DELAY_ENA, x)
+#define HSIO_DLL_CFG_DELAY_ENA_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DELAY_ENA, x)
+
+#define HSIO_DLL_CFG_DLL_ENA BIT(1)
+#define HSIO_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DLL_ENA, x)
+#define HSIO_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DLL_RST, x)
+#define HSIO_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DLL_RST, x)
+
#endif /* _LAN966X_HSIO_REGS_H_ */
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 7e3570789845..fc8ca0f3018d 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -70,8 +70,19 @@
#define TXn_TRAN_DRVR_EMP_EN 0x0078
+struct qcom_edp_cfg {
+ bool is_dp;
+
+ /* DP PHY swing and pre_emphasis tables */
+ const u8 (*swing_hbr_rbr)[4][4];
+ const u8 (*swing_hbr3_hbr2)[4][4];
+ const u8 (*pre_emphasis_hbr_rbr)[4][4];
+ const u8 (*pre_emphasis_hbr3_hbr2)[4][4];
+};
+
struct qcom_edp {
struct device *dev;
+ const struct qcom_edp_cfg *cfg;
struct phy *phy;
@@ -89,10 +100,84 @@ struct qcom_edp {
struct regulator_bulk_data supplies[2];
};
+static const u8 dp_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x16, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_pre_emp_hbr_rbr[4][4] = {
+ { 0x00, 0x0d, 0x14, 0x1a },
+ { 0x00, 0x0e, 0x15, 0xff },
+ { 0x00, 0x0e, 0xff, 0xff },
+ { 0x03, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_swing_hbr2_hbr3[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_pre_emp_hbr2_hbr3[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1b },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const struct qcom_edp_cfg dp_phy_cfg = {
+ .is_dp = true,
+ .swing_hbr_rbr = &dp_swing_hbr_rbr,
+ .swing_hbr3_hbr2 = &dp_swing_hbr2_hbr3,
+ .pre_emphasis_hbr_rbr = &dp_pre_emp_hbr_rbr,
+ .pre_emphasis_hbr3_hbr2 = &dp_pre_emp_hbr2_hbr3,
+};
+
+static const u8 edp_swing_hbr_rbr[4][4] = {
+ { 0x07, 0x0f, 0x16, 0x1f },
+ { 0x0d, 0x16, 0x1e, 0xff },
+ { 0x11, 0x1b, 0xff, 0xff },
+ { 0x16, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_pre_emp_hbr_rbr[4][4] = {
+ { 0x05, 0x12, 0x17, 0x1d },
+ { 0x05, 0x11, 0x18, 0xff },
+ { 0x06, 0x11, 0xff, 0xff },
+ { 0x00, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_swing_hbr2_hbr3[4][4] = {
+ { 0x0b, 0x11, 0x17, 0x1c },
+ { 0x10, 0x19, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_pre_emp_hbr2_hbr3[4][4] = {
+ { 0x08, 0x11, 0x17, 0x1b },
+ { 0x00, 0x0c, 0x13, 0xff },
+ { 0x05, 0x10, 0xff, 0xff },
+ { 0x00, 0xff, 0xff, 0xff }
+};
+
+static const struct qcom_edp_cfg edp_phy_cfg = {
+ .is_dp = false,
+ .swing_hbr_rbr = &edp_swing_hbr_rbr,
+ .swing_hbr3_hbr2 = &edp_swing_hbr2_hbr3,
+ .pre_emphasis_hbr_rbr = &edp_pre_emp_hbr_rbr,
+ .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3,
+};
+
static int qcom_edp_phy_init(struct phy *phy)
{
struct qcom_edp *edp = phy_get_drvdata(phy);
+ const struct qcom_edp_cfg *cfg = edp->cfg;
int ret;
+ u8 cfg8;
ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies);
if (ret)
@@ -117,6 +202,13 @@ static int qcom_edp_phy_init(struct phy *phy)
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
edp->edp + DP_PHY_PD_CTL);
+ if (cfg && cfg->is_dp)
+ cfg8 = 0xb7;
+ else
+ cfg8 = 0x37;
+
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
writel(0x00, edp->edp + DP_PHY_AUX_CFG0);
writel(0x13, edp->edp + DP_PHY_AUX_CFG1);
writel(0x24, edp->edp + DP_PHY_AUX_CFG2);
@@ -125,7 +217,7 @@ static int qcom_edp_phy_init(struct phy *phy)
writel(0x26, edp->edp + DP_PHY_AUX_CFG5);
writel(0x0a, edp->edp + DP_PHY_AUX_CFG6);
writel(0x03, edp->edp + DP_PHY_AUX_CFG7);
- writel(0x37, edp->edp + DP_PHY_AUX_CFG8);
+ writel(cfg8, edp->edp + DP_PHY_AUX_CFG8);
writel(0x03, edp->edp + DP_PHY_AUX_CFG9);
writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
@@ -142,14 +234,60 @@ out_disable_supplies:
return ret;
}
+static int qcom_edp_set_voltages(struct qcom_edp *edp, const struct phy_configure_opts_dp *dp_opts)
+{
+ const struct qcom_edp_cfg *cfg = edp->cfg;
+ unsigned int v_level = 0;
+ unsigned int p_level = 0;
+ u8 ldo_config;
+ u8 swing;
+ u8 emph;
+ int i;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < dp_opts->lanes; i++) {
+ v_level = max(v_level, dp_opts->voltage[i]);
+ p_level = max(p_level, dp_opts->pre[i]);
+ }
+
+ if (dp_opts->link_rate <= 2700) {
+ swing = (*cfg->swing_hbr_rbr)[v_level][p_level];
+ emph = (*cfg->pre_emphasis_hbr_rbr)[v_level][p_level];
+ } else {
+ swing = (*cfg->swing_hbr3_hbr2)[v_level][p_level];
+ emph = (*cfg->pre_emphasis_hbr3_hbr2)[v_level][p_level];
+ }
+
+ if (swing == 0xff || emph == 0xff)
+ return -EINVAL;
+
+ ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+
+ writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
+ writel(swing, edp->tx0 + TXn_TX_DRV_LVL);
+ writel(emph, edp->tx0 + TXn_TX_EMP_POST1_LVL);
+
+ writel(ldo_config, edp->tx1 + TXn_LDO_CONFIG);
+ writel(swing, edp->tx1 + TXn_TX_DRV_LVL);
+ writel(emph, edp->tx1 + TXn_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
const struct phy_configure_opts_dp *dp_opts = &opts->dp;
struct qcom_edp *edp = phy_get_drvdata(phy);
+ int ret = 0;
memcpy(&edp->dp_opts, dp_opts, sizeof(*dp_opts));
- return 0;
+ if (dp_opts->set_voltages)
+ ret = qcom_edp_set_voltages(edp, dp_opts);
+
+ return ret;
}
static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
@@ -272,31 +410,30 @@ static int qcom_edp_configure_pll(const struct qcom_edp *edp)
return 0;
}
-static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
+static int qcom_edp_set_vco_div(const struct qcom_edp *edp, unsigned long *pixel_freq)
{
const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
- unsigned long pixel_freq;
u32 vco_div;
switch (dp_opts->link_rate) {
case 1620:
vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
+ *pixel_freq = 1620000000UL / 2;
break;
case 2700:
vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
+ *pixel_freq = 2700000000UL / 2;
break;
case 5400:
vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
+ *pixel_freq = 5400000000UL / 4;
break;
case 8100:
vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
+ *pixel_freq = 8100000000UL / 6;
break;
default:
@@ -306,18 +443,20 @@ static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
- clk_set_rate(edp->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
-
return 0;
}
static int qcom_edp_phy_power_on(struct phy *phy)
{
const struct qcom_edp *edp = phy_get_drvdata(phy);
+ const struct qcom_edp_cfg *cfg = edp->cfg;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ unsigned long pixel_freq;
+ u8 ldo_config;
int timeout;
int ret;
u32 val;
+ u8 cfg1;
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
@@ -330,8 +469,11 @@ static int qcom_edp_phy_power_on(struct phy *phy)
if (timeout)
return timeout;
- writel(0x01, edp->tx0 + TXn_LDO_CONFIG);
- writel(0x01, edp->tx1 + TXn_LDO_CONFIG);
+
+ ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+
+ writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
+ writel(ldo_config, edp->tx1 + TXn_LDO_CONFIG);
writel(0x00, edp->tx0 + TXn_LANE_MODE_1);
writel(0x00, edp->tx1 + TXn_LANE_MODE_1);
@@ -363,7 +505,7 @@ static int qcom_edp_phy_power_on(struct phy *phy)
writel(0x01, edp->tx1 + TXn_TRAN_DRVR_EMP_EN);
writel(0x04, edp->tx1 + TXn_TX_BAND);
- ret = qcom_edp_set_vco_div(edp);
+ ret = qcom_edp_set_vco_div(edp, &pixel_freq);
if (ret)
return ret;
@@ -398,19 +540,46 @@ static int qcom_edp_phy_power_on(struct phy *phy)
writel(0x1f, edp->tx0 + TXn_TX_DRV_LVL);
writel(0x1f, edp->tx1 + TXn_TX_DRV_LVL);
- writel(0x4, edp->tx0 + TXn_HIGHZ_DRVR_EN);
- writel(0x3, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
- writel(0x4, edp->tx1 + TXn_HIGHZ_DRVR_EN);
- writel(0x0, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
- writel(0x3, edp->edp + DP_PHY_CFG_1);
+ if (edp->dp_opts.lanes == 1) {
+ bias0_en = 0x01;
+ bias1_en = 0x00;
+ drvr0_en = 0x06;
+ drvr1_en = 0x07;
+ cfg1 = 0x1;
+ } else if (edp->dp_opts.lanes == 2) {
+ bias0_en = 0x03;
+ bias1_en = 0x00;
+ drvr0_en = 0x04;
+ drvr1_en = 0x07;
+ cfg1 = 0x3;
+ } else {
+ bias0_en = 0x03;
+ bias1_en = 0x03;
+ drvr0_en = 0x04;
+ drvr1_en = 0x04;
+ cfg1 = 0xf;
+ }
+
+ writel(drvr0_en, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(bias0_en, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(bias1_en, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(cfg1, edp->edp + DP_PHY_CFG_1);
writel(0x18, edp->edp + DP_PHY_CFG);
usleep_range(100, 1000);
writel(0x19, edp->edp + DP_PHY_CFG);
- return readl_poll_timeout(edp->edp + DP_PHY_STATUS,
- val, val & BIT(1), 500, 10000);
+ ret = readl_poll_timeout(edp->edp + DP_PHY_STATUS,
+ val, val & BIT(1), 500, 10000);
+ if (ret)
+ return ret;
+
+ clk_set_rate(edp->dp_link_hw.clk, edp->dp_opts.link_rate * 100000);
+ clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
+
+ return 0;
}
static int qcom_edp_phy_power_off(struct phy *phy)
@@ -571,21 +740,24 @@ static int qcom_edp_clks_register(struct qcom_edp *edp, struct device_node *np)
{
struct clk_hw_onecell_data *data;
struct clk_init_data init = { };
+ char name[64];
int ret;
data = devm_kzalloc(edp->dev, struct_size(data, hws, 2), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ snprintf(name, sizeof(name), "%s::link_clk", dev_name(edp->dev));
init.ops = &qcom_edp_dp_link_clk_ops;
- init.name = "edp_phy_pll_link_clk";
+ init.name = name;
edp->dp_link_hw.init = &init;
ret = devm_clk_hw_register(edp->dev, &edp->dp_link_hw);
if (ret)
return ret;
+ snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(edp->dev));
init.ops = &qcom_edp_dp_pixel_clk_ops;
- init.name = "edp_phy_pll_vco_div_clk";
+ init.name = name;
edp->dp_pixel_hw.init = &init;
ret = devm_clk_hw_register(edp->dev, &edp->dp_pixel_hw);
if (ret)
@@ -610,6 +782,7 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
return -ENOMEM;
edp->dev = dev;
+ edp->cfg = of_device_get_match_data(&pdev->dev);
edp->edp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(edp->edp))
@@ -670,6 +843,8 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
static const struct of_device_id qcom_edp_phy_match_table[] = {
{ .compatible = "qcom,sc7280-edp-phy" },
{ .compatible = "qcom,sc8180x-edp-phy" },
+ { .compatible = "qcom,sc8280xp-dp-phy", .data = &dp_phy_cfg },
+ { .compatible = "qcom,sc8280xp-edp-phy", .data = &edp_phy_cfg },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 4b1828976104..9807c4d935cd 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -28,16 +28,11 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -71,11 +66,6 @@
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -115,22 +105,14 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -606,6 +588,160 @@ static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
};
+static const struct qmp_phy_init_tbl qmp_v5_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v5_5nm_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_3, 0x51),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_PRE_STALL_LDO_BOOST_EN, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TRAN_DRVR_EMP_EN, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TX_BAND, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xfd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0xfd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MSB_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MSB_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAXVAL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SVS_MODE_CLK_SEL, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_2, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_3, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B0, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B1, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B3, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B5, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B6, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B7, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B0, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B1, 0x63),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B3, 0x23),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B4, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B5, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B6, 0x8e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B7, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_CAL_CODE_OVERRIDE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_SUMMER_CAL_SPD_MODE, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_PI_CONTROLS, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_POSTCAL_OFFSET, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_VGA_CAL_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_VGA_CAL_MAN_VAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_3, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_GM_CAL, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE2, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x3f),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
/* list of regulators */
struct qmp_regulator_data {
@@ -618,14 +754,69 @@ static struct qmp_regulator_data qmp_phy_vreg_l[] = {
{ .name = "vdda-pll", .enable_load = 36000 },
};
+static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1a },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x00, 0x0c, 0x14, 0x19 },
+ { 0x00, 0x0b, 0x12, 0xff },
+ { 0x00, 0x0b, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x20, 0x2c, 0x35, 0x3b },
+ { 0x22, 0x2e, 0x36, 0xff },
+ { 0x22, 0x31, 0xff, 0xff },
+ { 0x24, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x22, 0x32, 0x36, 0x3a },
+ { 0x29, 0x39, 0x3f, 0xff },
+ { 0x30, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x20, 0x2d, 0x34, 0x3a },
+ { 0x20, 0x2e, 0x35, 0xff },
+ { 0x20, 0x2e, 0xff, 0xff },
+ { 0x24, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
+ { 0x28, 0x2f, 0x36, 0x3f },
+ { 0x31, 0x3e, 0x3f, 0xff },
+ { 0x36, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
struct qmp_phy;
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -649,6 +840,12 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
int serdes_tbl_hbr3_num;
+ /* DP PHY swing and pre_emphasis tables */
+ const u8 (*swing_hbr_rbr)[4][4];
+ const u8 (*swing_hbr3_hbr2)[4][4];
+ const u8 (*pre_emphasis_hbr_rbr)[4][4];
+ const u8 (*pre_emphasis_hbr3_hbr2)[4][4];
+
/* DP PHY callbacks */
int (*configure_dp_phy)(struct qmp_phy *qphy);
void (*configure_dp_tx)(struct qmp_phy *qphy);
@@ -679,11 +876,6 @@ struct qmp_phy_cfg {
int pwrdn_delay_min;
int pwrdn_delay_max;
- /* true, if PHY has a separate DP_COM control block */
- bool has_phy_dp_com_ctrl;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
@@ -708,9 +900,7 @@ struct qmp_phy_combo_cfg {
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pcs_usb: iomapped memory space for lane's pcs_usb
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @lane_rst: lane's reset controller
* @mode: current PHY mode
* @dp_aux_cfg: Display port aux config
* @dp_opts: Display port optional config
@@ -728,9 +918,7 @@ struct qmp_phy {
void __iomem *pcs_misc;
void __iomem *pcs_usb;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
- struct reset_control *lane_rst;
enum phy_mode mode;
unsigned int dp_aux_cfg;
struct phy_configure_opts_dp dp_opts;
@@ -784,6 +972,8 @@ static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
+static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy);
+
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -833,7 +1023,7 @@ static const char * const sc7180_usb3phy_reset_l[] = {
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -858,14 +1048,11 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
@@ -881,6 +1068,11 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v3_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
@@ -889,9 +1081,6 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
@@ -903,9 +1092,43 @@ static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
.dp_cfg = &sc7180_dpphy_cfg,
};
+static const struct qmp_phy_cfg sdm845_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_combo_cfg sdm845_usb3dpphy_cfg = {
+ .usb_cfg = &sdm845_usb3phy_cfg,
+ .dp_cfg = &sc7180_dpphy_cfg,
+};
+
static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -934,14 +1157,11 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v4_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
@@ -957,6 +1177,11 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v3_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
@@ -965,9 +1190,6 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
@@ -979,9 +1201,81 @@ static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = {
.dp_cfg = &sc8180x_dpphy_cfg,
};
+static const struct qmp_phy_cfg sc8280xp_usb43dp_usb_cfg = {
+ .type = PHY_TYPE_USB3,
+ .lanes = 2,
+
+ .serdes_tbl = sc8280xp_usb43dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_serdes_tbl),
+ .tx_tbl = sc8280xp_usb43dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_tx_tbl),
+ .rx_tbl = sc8280xp_usb43dp_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_rx_tbl),
+ .pcs_tbl = sc8280xp_usb43dp_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sc8280xp_usb43dp_dp_cfg = {
+ .type = PHY_TYPE_DP,
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v5_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v5_dp_serdes_tbl),
+ .tx_tbl = qmp_v5_5nm_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v5_5nm_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v5_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sc8280xp_usb43dpphy_combo_cfg = {
+ .usb_cfg = &sc8280xp_usb43dp_usb_cfg,
+ .dp_cfg = &sc8280xp_usb43dp_dp_cfg,
+};
+
static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1009,14 +1303,11 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v4_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
@@ -1032,6 +1323,11 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
@@ -1040,9 +1336,6 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v4_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
@@ -1054,7 +1347,7 @@ static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
.dp_cfg = &sm8250_dpphy_cfg,
};
-static void qcom_qmp_phy_combo_configure_lane(void __iomem *base,
+static void qmp_combo_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -1077,15 +1370,15 @@ static void qcom_qmp_phy_combo_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_combo_configure(void __iomem *base,
+static void qmp_combo_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_combo_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_combo_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy)
+static int qmp_combo_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
@@ -1093,27 +1386,27 @@ static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy)
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
if (cfg->type == PHY_TYPE_DP) {
switch (dp_opts->link_rate) {
case 1620:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_rbr,
cfg->serdes_tbl_rbr_num);
break;
case 2700:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr,
cfg->serdes_tbl_hbr_num);
break;
case 5400:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr2,
cfg->serdes_tbl_hbr2_num);
break;
case 8100:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr3,
cfg->serdes_tbl_hbr3_num);
break;
@@ -1169,38 +1462,11 @@ static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
}
-static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
- { 0x00, 0x0c, 0x15, 0x1a },
- { 0x02, 0x0e, 0x16, 0xff },
- { 0x02, 0x11, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
- { 0x02, 0x12, 0x16, 0x1a },
- { 0x09, 0x19, 0x1f, 0xff },
- { 0x10, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
- { 0x00, 0x0c, 0x14, 0x19 },
- { 0x00, 0x0b, 0x12, 0xff },
- { 0x00, 0x0b, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
- { 0x08, 0x0f, 0x16, 0x1f },
- { 0x11, 0x1e, 0x1f, 0xff },
- { 0x19, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy,
+static int qmp_combo_configure_dp_swing(struct qmp_phy *qphy,
unsigned int drv_lvl_reg, unsigned int emp_post_reg)
{
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
unsigned int v_level = 0, p_level = 0;
u8 voltage_swing_cfg, pre_emphasis_cfg;
int i;
@@ -1211,11 +1477,11 @@ static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy,
}
if (dp_opts->link_rate <= 2700) {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
+ voltage_swing_cfg = (*cfg->swing_hbr_rbr)[v_level][p_level];
+ pre_emphasis_cfg = (*cfg->pre_emphasis_hbr_rbr)[v_level][p_level];
} else {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
+ voltage_swing_cfg = (*cfg->swing_hbr3_hbr2)[v_level][p_level];
+ pre_emphasis_cfg = (*cfg->pre_emphasis_hbr3_hbr2)[v_level][p_level];
}
/* TODO: Move check to config check */
@@ -1239,8 +1505,7 @@ static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
u32 bias_en, drvr_en;
- if (qcom_qmp_phy_combo_configure_dp_swing(qphy,
- QSERDES_V3_TX_TX_DRV_LVL,
+ if (qmp_combo_configure_dp_swing(qphy, QSERDES_V3_TX_TX_DRV_LVL,
QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
return;
@@ -1258,7 +1523,7 @@ static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
}
-static bool qcom_qmp_phy_combo_configure_dp_mode(struct qmp_phy *qphy)
+static bool qmp_combo_configure_dp_mode(struct qmp_phy *qphy)
{
u32 val;
bool reverse = false;
@@ -1295,7 +1560,7 @@ static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
u32 phy_vco_div, status;
unsigned long pixel_freq;
- qcom_qmp_phy_combo_configure_dp_mode(qphy);
+ qmp_combo_configure_dp_mode(qphy);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
@@ -1415,23 +1680,20 @@ static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- qcom_qmp_phy_combo_configure_dp_swing(qphy,
- QSERDES_V4_TX_TX_DRV_LVL,
+ qmp_combo_configure_dp_swing(qphy, QSERDES_V4_TX_TX_DRV_LVL,
QSERDES_V4_TX_TX_EMP_POST1_LVL);
}
-static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+static int qcom_qmp_v45_phy_configure_dp_phy(struct qmp_phy *qphy)
{
const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
u32 phy_vco_div, status;
unsigned long pixel_freq;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse;
writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
- reverse = qcom_qmp_phy_combo_configure_dp_mode(qphy);
+ qmp_combo_configure_dp_mode(qphy);
writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
@@ -1509,6 +1771,21 @@ static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
10000))
return -ETIMEDOUT;
+ return 0;
+}
+
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse = false;
+ u32 status;
+ int ret;
+
+ ret = qcom_qmp_v45_phy_configure_dp_phy(qphy);
+ if (ret < 0)
+ return ret;
+
/*
* At least for 7nm DP PHY this has to be done after enabling link
* clock.
@@ -1559,6 +1836,63 @@ static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
return 0;
}
+static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse = false;
+ u32 status;
+ int ret;
+
+ ret = qcom_qmp_v45_phy_configure_dp_phy(qphy);
+ if (ret < 0)
+ return ret;
+
+ if (dp_opts->lanes == 1) {
+ bias0_en = reverse ? 0x3e : 0x1a;
+ drvr0_en = reverse ? 0x13 : 0x10;
+ bias1_en = reverse ? 0x15 : 0x3e;
+ drvr1_en = reverse ? 0x10 : 0x13;
+ } else if (dp_opts->lanes == 2) {
+ bias0_en = reverse ? 0x3f : 0x15;
+ drvr0_en = 0x10;
+ bias1_en = reverse ? 0x15 : 0x3f;
+ drvr1_en = 0x10;
+ } else {
+ bias0_en = 0x3f;
+ bias1_en = 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ }
+
+ writel(drvr0_en, qphy->tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
+ writel(bias0_en, qphy->tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, qphy->tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
+ writel(bias1_en, qphy->tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x0a, qphy->tx + QSERDES_V5_5NM_TX_TX_POL_INV);
+ writel(0x0a, qphy->tx2 + QSERDES_V5_5NM_TX_TX_POL_INV);
+
+ writel(0x27, qphy->tx + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
/*
* We need to calibrate the aux setting here as many times
* as the caller tries
@@ -1603,7 +1937,7 @@ static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy)
+static int qmp_combo_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1640,28 +1974,25 @@ static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy)
if (ret)
goto err_assert_reset;
- if (cfg->has_phy_dp_com_ctrl) {
- qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
- SW_PWRDN);
- /* override hardware control for reset of qmp phy */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, SW_PWRDN);
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
- qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
- USB3_MODE | DP_MODE);
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
- }
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
qphy_setbits(pcs,
@@ -1685,7 +2016,7 @@ err_unlock:
return ret;
}
-static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy)
+static int qmp_combo_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1709,7 +2040,7 @@ static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_combo_init(struct phy *phy)
+static int qmp_combo_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1717,7 +2048,7 @@ static int qcom_qmp_phy_combo_init(struct phy *phy)
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_combo_com_init(qphy);
+ ret = qmp_combo_com_init(qphy);
if (ret)
return ret;
@@ -1727,7 +2058,7 @@ static int qcom_qmp_phy_combo_init(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_power_on(struct phy *phy)
+static int qmp_combo_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1739,7 +2070,7 @@ static int qcom_qmp_phy_combo_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_combo_serdes_init(qphy);
+ qmp_combo_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -1748,33 +2079,29 @@ static int qcom_qmp_phy_combo_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_combo_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_combo_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_combo_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_combo_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 2);
}
/* Configure special DP tx tunings */
if (cfg->type == PHY_TYPE_DP)
cfg->configure_dp_tx(qphy);
- qcom_qmp_phy_combo_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_combo_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_combo_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_combo_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 2);
}
/* Configure link rate, swing, etc. */
- if (cfg->type == PHY_TYPE_DP) {
+ if (cfg->type == PHY_TYPE_DP)
cfg->configure_dp_phy(qphy);
- } else {
- qcom_qmp_phy_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- }
+ else
+ qmp_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
@@ -1808,7 +2135,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_combo_power_off(struct phy *phy)
+static int qmp_combo_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1838,42 +2165,41 @@ static int qcom_qmp_phy_combo_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_exit(struct phy *phy)
+static int qmp_combo_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- qcom_qmp_phy_combo_com_exit(qphy);
+ qmp_combo_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_combo_enable(struct phy *phy)
+static int qmp_combo_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_combo_init(phy);
+ ret = qmp_combo_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_power_on(phy);
+ ret = qmp_combo_power_on(phy);
if (ret)
- qcom_qmp_phy_combo_exit(phy);
+ qmp_combo_exit(phy);
return ret;
}
-static int qcom_qmp_phy_combo_disable(struct phy *phy)
+static int qmp_combo_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_combo_power_off(phy);
+ ret = qmp_combo_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_combo_exit(phy);
+ return qmp_combo_exit(phy);
}
-static int qcom_qmp_phy_combo_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
+static int qmp_combo_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -1882,7 +2208,7 @@ static int qcom_qmp_phy_combo_set_mode(struct phy *phy,
return 0;
}
-static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -1911,7 +2237,7 @@ static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
}
-static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb;
@@ -1929,7 +2255,7 @@ static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
}
-static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
+static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -1946,7 +2272,7 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
return 0;
}
- qcom_qmp_phy_combo_enable_autonomous_mode(qphy);
+ qmp_combo_enable_autonomous_mode(qphy);
clk_disable_unprepare(qphy->pipe_clk);
clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
@@ -1954,7 +2280,7 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev)
+static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -1983,12 +2309,12 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev)
return ret;
}
- qcom_qmp_phy_combo_disable_autonomous_mode(qphy);
+ qmp_combo_disable_autonomous_mode(qphy);
return 0;
}
-static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2020,7 +2346,7 @@ static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy
return 0;
}
-static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2041,7 +2367,7 @@ static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_ph
return 0;
}
-static int qcom_qmp_phy_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2309,33 +2635,31 @@ static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy,
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_combo_usb_ops = {
- .init = qcom_qmp_phy_combo_enable,
- .exit = qcom_qmp_phy_combo_disable,
- .set_mode = qcom_qmp_phy_combo_set_mode,
+static const struct phy_ops qmp_combo_usb_ops = {
+ .init = qmp_combo_enable,
+ .exit = qmp_combo_disable,
+ .set_mode = qmp_combo_set_mode,
.owner = THIS_MODULE,
};
-static const struct phy_ops qcom_qmp_phy_combo_dp_ops = {
- .init = qcom_qmp_phy_combo_init,
+static const struct phy_ops qmp_combo_dp_ops = {
+ .init = qmp_combo_init,
.configure = qcom_qmp_dp_phy_configure,
- .power_on = qcom_qmp_phy_combo_power_on,
+ .power_on = qmp_combo_power_on,
.calibrate = qcom_qmp_dp_phy_calibrate,
- .power_off = qcom_qmp_phy_combo_power_off,
- .exit = qcom_qmp_phy_combo_exit,
- .set_mode = qcom_qmp_phy_combo_set_mode,
+ .power_off = qmp_combo_power_off,
+ .exit = qmp_combo_exit,
+ .set_mode = qmp_combo_set_mode,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id,
+static int qmp_combo_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
const struct phy_ops *ops;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -2350,49 +2674,39 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
if (cfg->pcs_usb_offset)
qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs_misc = NULL;
+ }
/*
* Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
@@ -2401,24 +2715,19 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
* Otherwise, we initialize pipe clock to NULL for
* all phys that don't need this.
*/
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
- if (cfg->type == PHY_TYPE_USB3) {
- ret = PTR_ERR(qphy->pipe_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "failed to get lane%d pipe_clk, %d\n",
- id, ret);
- return ret;
- }
+ if (cfg->type == PHY_TYPE_USB3)
+ return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
+ "failed to get lane%d pipe_clk\n",
+ id);
qphy->pipe_clk = NULL;
}
if (cfg->type == PHY_TYPE_DP)
- ops = &qcom_qmp_phy_combo_dp_ops;
+ ops = &qmp_combo_dp_ops;
else
- ops = &qcom_qmp_phy_combo_usb_ops;
+ ops = &qmp_combo_usb_ops;
generic_phy = devm_phy_create(dev, np, ops);
if (IS_ERR(generic_phy)) {
@@ -2428,7 +2737,6 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2436,12 +2744,16 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
return 0;
}
-static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
+static const struct of_device_id qmp_combo_of_match_table[] = {
{
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
.data = &sc7180_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sdm845-qmp-usb3-dp-phy",
+ .data = &sdm845_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sm8250-qmp-usb3-dp-phy",
.data = &sm8250_usb3dpphy_cfg,
},
@@ -2449,16 +2761,20 @@ static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
.compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
.data = &sc8180x_usb3dpphy_cfg,
},
+ {
+ .compatible = "qcom,sc8280xp-qmp-usb43dp-phy",
+ .data = &sc8280xp_usb43dpphy_combo_cfg,
+ },
{ }
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_combo_phy_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table);
-static const struct dev_pm_ops qcom_qmp_phy_combo_pm_ops = {
- SET_RUNTIME_PM_OPS(qcom_qmp_phy_combo_runtime_suspend,
- qcom_qmp_phy_combo_runtime_resume, NULL)
+static const struct dev_pm_ops qmp_combo_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_combo_runtime_suspend,
+ qmp_combo_runtime_resume, NULL)
};
-static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
+static int qmp_combo_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2494,12 +2810,9 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- /* per PHY dp_com; if PHY has dp_com control block */
- if (cfg->has_phy_dp_com_ctrl) {
- qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(qmp->dp_com))
- return PTR_ERR(qmp->dp_com);
- }
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
/* Only two serdes for combo PHY */
dp_serdes = devm_platform_ioremap_resource(pdev, 2);
@@ -2511,21 +2824,18 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
mutex_init(&qmp->phy_mutex);
- ret = qcom_qmp_phy_combo_clk_init(dev, cfg);
+ ret = qmp_combo_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_reset_init(dev, cfg);
+ ret = qmp_combo_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_combo_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2537,7 +2847,9 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* Prevent runtime pm from being ON by default. Users can enable
* it using power/control in sysfs.
@@ -2551,7 +2863,7 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
serdes = dp_serdes;
/* Create per-lane phy */
- ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ ret = qmp_combo_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2569,7 +2881,7 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
serdes = usb_serdes;
/* Create per-lane phy */
- ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ ret = qmp_combo_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2592,29 +2904,24 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_combo_driver = {
- .probe = qcom_qmp_phy_combo_probe,
+static struct platform_driver qmp_combo_driver = {
+ .probe = qmp_combo_probe,
.driver = {
.name = "qcom-qmp-combo-phy",
- .pm = &qcom_qmp_phy_combo_pm_ops,
- .of_match_table = qcom_qmp_combo_phy_of_match_table,
+ .pm = &qmp_combo_pm_ops,
+ .of_match_table = qmp_combo_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_combo_driver);
+module_platform_driver(qmp_combo_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP USB+DP combo PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index be6a94439b6c..461f0b5d464a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -36,46 +36,13 @@
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -123,14 +90,8 @@ enum qphy_reg_layout {
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -223,36 +184,20 @@ static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ /* number of PHYs provided by this block */
+ int num_phys;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
int serdes_tbl_num;
- const struct qmp_phy_init_tbl *serdes_tbl_sec;
- int serdes_tbl_num_sec;
const struct qmp_phy_init_tbl *tx_tbl;
int tx_tbl_num;
- const struct qmp_phy_init_tbl *tx_tbl_sec;
- int tx_tbl_num_sec;
const struct qmp_phy_init_tbl *rx_tbl;
int rx_tbl_num;
- const struct qmp_phy_init_tbl *rx_tbl_sec;
- int rx_tbl_num_sec;
const struct qmp_phy_init_tbl *pcs_tbl;
int pcs_tbl_num;
- const struct qmp_phy_init_tbl *pcs_tbl_sec;
- int pcs_tbl_num_sec;
- const struct qmp_phy_init_tbl *pcs_misc_tbl;
- int pcs_misc_tbl_num;
- const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
- int pcs_misc_tbl_num_sec;
/* clock ids to be requested */
const char * const *clk_list;
@@ -289,12 +234,10 @@ struct qmp_phy_cfg {
* @tx: iomapped memory space for lane's tx
* @rx: iomapped memory space for lane's rx
* @pcs: iomapped memory space for lane's pcs
- * @pcs_misc: iomapped memory space for lane's pcs_misc
* @pipe_clk: pipe clock
* @index: lane index
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -303,12 +246,10 @@ struct qmp_phy {
void __iomem *tx;
void __iomem *rx;
void __iomem *pcs;
- void __iomem *pcs_misc;
struct clk *pipe_clk;
unsigned int index;
struct qcom_qmp *qmp;
struct reset_control *lane_rst;
- enum phy_mode mode;
};
/**
@@ -377,8 +318,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 3,
+ .num_phys = 3,
.serdes_tbl = msm8996_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
@@ -406,7 +346,7 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
-static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base,
+static void qmp_pcie_msm8996_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -429,15 +369,15 @@ static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_pcie_msm8996_configure(void __iomem *base,
+static void qmp_pcie_msm8996_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -448,11 +388,7 @@ static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
unsigned int mask, val;
int ret;
- qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
- if (cfg->serdes_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
- cfg->serdes_tbl_num_sec);
-
+ qmp_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
@@ -472,7 +408,7 @@ static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_com_init(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -525,7 +461,7 @@ err_unlock:
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -555,21 +491,21 @@ static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_init(struct phy *phy)
+static int qmp_pcie_msm8996_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_pcie_msm8996_com_init(qphy);
+ ret = qmp_pcie_msm8996_com_init(qphy);
if (ret)
return ret;
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
+static int qmp_pcie_msm8996_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -577,12 +513,11 @@ static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
void __iomem *tx = qphy->tx;
void __iomem *rx = qphy->rx;
void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
void __iomem *status;
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_pcie_msm8996_serdes_init(qphy);
+ qmp_pcie_msm8996_serdes_init(qphy);
ret = reset_control_deassert(qphy->lane_rst);
if (ret) {
@@ -598,28 +533,13 @@ static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 1);
-
- qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
- cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
-
- qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- if (cfg->pcs_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
- cfg->pcs_tbl_num_sec);
-
- qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
- cfg->pcs_misc_tbl_num);
- if (cfg->pcs_misc_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
- cfg->pcs_misc_tbl_num_sec);
+ qmp_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 1);
+
+ qmp_pcie_msm8996_configure_lane(rx, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 1);
+
+ qmp_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
* Pull out PHY from POWER DOWN state.
@@ -657,7 +577,7 @@ err_reset_lane:
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy)
+static int qmp_pcie_msm8996_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -682,53 +602,43 @@ static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_exit(struct phy *phy)
+static int qmp_pcie_msm8996_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
reset_control_assert(qphy->lane_rst);
- qcom_qmp_phy_pcie_msm8996_com_exit(qphy);
+ qmp_pcie_msm8996_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_enable(struct phy *phy)
+static int qmp_pcie_msm8996_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_msm8996_init(phy);
+ ret = qmp_pcie_msm8996_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_power_on(phy);
+ ret = qmp_pcie_msm8996_power_on(phy);
if (ret)
- qcom_qmp_phy_pcie_msm8996_exit(phy);
+ qmp_pcie_msm8996_exit(phy);
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_disable(struct phy *phy)
+static int qmp_pcie_msm8996_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_msm8996_power_off(phy);
+ ret = qmp_pcie_msm8996_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_pcie_msm8996_exit(phy);
+ return qmp_pcie_msm8996_exit(phy);
}
-static int qcom_qmp_phy_pcie_msm8996_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qphy->mode = mode;
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -744,7 +654,7 @@ static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -765,7 +675,7 @@ static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -841,10 +751,9 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_pcie_msm8996_ops = {
- .power_on = qcom_qmp_phy_pcie_msm8996_enable,
- .power_off = qcom_qmp_phy_pcie_msm8996_disable,
- .set_mode = qcom_qmp_phy_pcie_msm8996_set_mode,
+static const struct phy_ops qmp_pcie_msm8996_ops = {
+ .power_on = qmp_pcie_msm8996_enable,
+ .power_off = qmp_pcie_msm8996_disable,
.owner = THIS_MODULE,
};
@@ -853,14 +762,12 @@ static void qcom_qmp_reset_control_put(void *data)
reset_control_put(data);
}
-static
-int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
+static int qmp_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -872,36 +779,26 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
/*
* Get memory resources for each phy lane:
* Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
- * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
- * For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
-
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
-
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- if (!qphy->pcs_misc)
- dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- /* Get lane reset, if any */
- snprintf(prop_name, sizeof(prop_name), "lane%d", id);
- qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
+ qphy->lane_rst = of_reset_control_get_exclusive_by_index(np, 0);
if (IS_ERR(qphy->lane_rst)) {
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
@@ -911,7 +808,7 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
if (ret)
return ret;
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_msm8996_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_pcie_msm8996_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -927,16 +824,16 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_pcie_msm8996_of_match_table[] = {
+static const struct of_device_id qmp_pcie_msm8996_of_match_table[] = {
{
.compatible = "qcom,msm8996-qmp-pcie-phy",
.data = &msm8996_pciephy_cfg,
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_msm8996_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_pcie_msm8996_of_match_table);
-static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
+static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -964,25 +861,22 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- expected_phys = cfg->nlanes;
+ expected_phys = cfg->num_phys;
mutex_init(&qmp->phy_mutex);
- ret = qcom_qmp_phy_pcie_msm8996_clk_init(dev, cfg);
+ ret = qmp_pcie_msm8996_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_reset_init(dev, cfg);
+ ret = qmp_pcie_msm8996_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_pcie_msm8996_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -993,18 +887,10 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_pcie_msm8996_create(dev, child, id, serdes, cfg);
+ ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -1026,28 +912,23 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_pcie_msm8996_driver = {
- .probe = qcom_qmp_phy_pcie_msm8996_probe,
+static struct platform_driver qmp_pcie_msm8996_driver = {
+ .probe = qmp_pcie_msm8996_probe,
.driver = {
.name = "qcom-qmp-msm8996-pcie-phy",
- .of_match_table = qcom_qmp_phy_pcie_msm8996_of_match_table,
+ .of_match_table = qmp_pcie_msm8996_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_pcie_msm8996_driver);
+module_platform_driver(qmp_pcie_msm8996_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 2d65e1f56bfc..5be5348fbb26 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -32,49 +32,11 @@
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
-
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-#define POWER_DOWN_DELAY_US_MIN 10
-#define POWER_DOWN_DELAY_US_MAX 11
-
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
struct qmp_phy_init_tbl {
unsigned int offset;
@@ -123,14 +85,8 @@ enum qphy_reg_layout {
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -1344,14 +1300,9 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -1390,7 +1341,6 @@ struct qmp_phy_cfg {
unsigned int start_ctrl;
unsigned int pwrdn_ctrl;
- unsigned int mask_com_pcs_ready;
/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
unsigned int phy_status;
@@ -1400,9 +1350,6 @@ struct qmp_phy_cfg {
int pwrdn_delay_min;
int pwrdn_delay_max;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* QMP PHY pipe clock interface rate */
unsigned long pipe_clock_rate;
};
@@ -1420,9 +1367,7 @@ struct qmp_phy_cfg {
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -1435,9 +1380,7 @@ struct qmp_phy {
void __iomem *rx2;
void __iomem *pcs_misc;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
- enum phy_mode mode;
};
/**
@@ -1514,8 +1457,7 @@ static const char * const sdm845_pciephy_reset_l[] = {
};
static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
@@ -1543,8 +1485,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
};
static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_pcie_gen3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
@@ -1573,8 +1514,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
};
static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq6018_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
@@ -1603,8 +1543,7 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
@@ -1634,8 +1573,7 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
@@ -1663,8 +1601,7 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -1702,8 +1639,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -1735,15 +1671,13 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8998_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
@@ -1767,8 +1701,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
};
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sc8180x_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
@@ -1797,8 +1730,7 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
@@ -1822,15 +1754,13 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS_4_20,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
@@ -1860,8 +1790,7 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
@@ -1885,13 +1814,12 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
-static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base,
+static void qmp_pcie_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -1914,31 +1842,30 @@ static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_pcie_configure(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static void qmp_pcie_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
{
- qcom_qmp_phy_pcie_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_pcie_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_pcie_serdes_init(struct qmp_phy *qphy)
+static int qmp_pcie_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
- if (cfg->serdes_tbl_sec)
- qcom_qmp_phy_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
- cfg->serdes_tbl_num_sec);
+ qmp_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, cfg->serdes_tbl_num_sec);
return 0;
}
-static int qcom_qmp_phy_pcie_com_init(struct qmp_phy *qphy)
+static int qmp_pcie_init(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
@@ -1985,8 +1912,9 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy)
+static int qmp_pcie_exit(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1999,21 +1927,7 @@ static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_init(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- ret = qcom_qmp_phy_pcie_com_init(qphy);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
+static int qmp_pcie_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -2026,7 +1940,7 @@ static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_pcie_serdes_init(qphy);
+ qmp_pcie_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -2035,47 +1949,31 @@ static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 1);
-
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 2);
+ qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, cfg->tx_tbl_num_sec, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 2);
+ qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl_sec,
+ cfg->tx_tbl_num_sec, 2);
}
- qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
- cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
-
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl_sec,
- cfg->rx_tbl_num_sec, 2);
+ qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 2);
+ qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl_sec,
+ cfg->rx_tbl_num_sec, 2);
}
- qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- if (cfg->pcs_tbl_sec)
- qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
- cfg->pcs_tbl_num_sec);
+ qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, cfg->pcs_tbl_num_sec);
- qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
- cfg->pcs_misc_tbl_num);
- if (cfg->pcs_misc_tbl_sec)
- qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
- cfg->pcs_misc_tbl_num_sec);
+ qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num);
+ qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, cfg->pcs_misc_tbl_num_sec);
/*
* Pull out PHY from POWER DOWN state.
@@ -2111,7 +2009,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_pcie_power_off(struct phy *phy)
+static int qmp_pcie_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2136,51 +2034,33 @@ static int qcom_qmp_phy_pcie_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_pcie_exit(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qcom_qmp_phy_pcie_com_exit(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_enable(struct phy *phy)
+static int qmp_pcie_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_init(phy);
+ ret = qmp_pcie_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_power_on(phy);
+ ret = qmp_pcie_power_on(phy);
if (ret)
- qcom_qmp_phy_pcie_exit(phy);
+ qmp_pcie_exit(phy);
return ret;
}
-static int qcom_qmp_phy_pcie_disable(struct phy *phy)
+static int qmp_pcie_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_power_off(phy);
+ ret = qmp_pcie_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_pcie_exit(phy);
-}
-
-static int qcom_qmp_phy_pcie_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- qphy->mode = mode;
-
- return 0;
+ return qmp_pcie_exit(phy);
}
-static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2196,7 +2076,7 @@ static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2217,7 +2097,7 @@ static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy
return 0;
}
-static int qcom_qmp_phy_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2300,21 +2180,18 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_pcie_ops = {
- .power_on = qcom_qmp_phy_pcie_enable,
- .power_off = qcom_qmp_phy_pcie_disable,
- .set_mode = qcom_qmp_phy_pcie_set_mode,
+static const struct phy_ops qmp_pcie_ops = {
+ .power_on = qmp_pcie_enable,
+ .power_off = qmp_pcie_disable,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
+static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -2329,59 +2206,51 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ if (of_device_is_compatible(dev->of_node, "qcom,sdm845-qhp-pcie-phy"))
+ qphy->rx = qphy->tx;
+ else
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc &&
+ if (IS_ERR(qphy->pcs_misc) &&
of_device_is_compatible(dev->of_node, "qcom,ipq6018-qmp-pcie-phy"))
qphy->pcs_misc = qphy->pcs + 0x400;
- if (!qphy->pcs_misc)
- dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ if (IS_ERR(qphy->pcs_misc)) {
+ if (cfg->pcs_misc_tbl || cfg->pcs_misc_tbl_sec)
+ return PTR_ERR(qphy->pcs_misc);
+ }
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_pcie_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -2389,7 +2258,6 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2397,7 +2265,7 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = {
+static const struct of_device_id qmp_pcie_of_match_table[] = {
{
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
@@ -2440,9 +2308,9 @@ static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_pcie_of_match_table);
-static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
+static int qmp_pcie_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2470,21 +2338,18 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- ret = qcom_qmp_phy_pcie_clk_init(dev, cfg);
+ ret = qmp_pcie_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_reset_init(dev, cfg);
+ ret = qmp_pcie_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_pcie_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2495,18 +2360,10 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_pcie_create(dev, child, id, serdes, cfg);
+ ret = qmp_pcie_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2528,28 +2385,23 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_pcie_driver = {
- .probe = qcom_qmp_phy_pcie_probe,
+static struct platform_driver qmp_pcie_driver = {
+ .probe = qmp_pcie_probe,
.driver = {
.name = "qcom-qmp-pcie-phy",
- .of_match_table = qcom_qmp_phy_pcie_of_match_table,
+ .of_match_table = qmp_pcie_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_pcie_driver);
+module_platform_driver(qmp_pcie_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP PCIe PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
index 61a44519f969..04f260711ea1 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
@@ -7,11 +7,24 @@
#define QCOM_PHY_QMP_PCS_V5_H_
/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V5_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V5_PCS_RX_CONFIG 0x1b0
+#define QPHY_V5_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V5_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V5_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V5_PCS_EQ_CONFIG1 0x1dc
#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
+#define QPHY_V5_PCS_EQ_CONFIG5 0x1ec
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
new file mode 100644
index 000000000000..a1c088bd5158
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_5NM_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V5_5NM_H_
+
+/* Only for QMP V5 5NM PHY - TX registers */
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_5NM_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_5NM_TX_LANE_MODE_2 0x7c
+#define QSERDES_V5_5NM_TX_LANE_MODE_3 0x80
+#define QSERDES_V5_5NM_TX_BIST_MODE_LANENO 0x00
+#define QSERDES_V5_5NM_TX_BIST_INVERT 0x04
+#define QSERDES_V5_5NM_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V5_5NM_TX_TX_IDLE_LVL_LARGE_AMP 0x10
+#define QSERDES_V5_5NM_TX_TX_DRV_LVL 0x14
+#define QSERDES_V5_5NM_TX_TX_DRV_LVL_OFFSET 0x18
+#define QSERDES_V5_5NM_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V5_5NM_TX_PRE_STALL_LDO_BOOST_EN 0x20
+#define QSERDES_V5_5NM_TX_LPB_EN 0x24
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_TX 0x28
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_RX 0x2c
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_5NM_TX_PERL_LENGTH1 0x38
+#define QSERDES_V5_5NM_TX_PERL_LENGTH2 0x3c
+#define QSERDES_V5_5NM_TX_SERDES_BYP_EN_OUT 0x40
+#define QSERDES_V5_5NM_TX_DEBUG_BUS_SEL 0x44
+#define QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V5_5NM_TX_TX_POL_INV 0x50
+#define QSERDES_V5_5NM_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
+#define QSERDES_V5_5NM_TX_BIST_PATTERN1 0x58
+#define QSERDES_V5_5NM_TX_BIST_PATTERN2 0x5c
+#define QSERDES_V5_5NM_TX_BIST_PATTERN3 0x60
+#define QSERDES_V5_5NM_TX_BIST_PATTERN4 0x64
+#define QSERDES_V5_5NM_TX_BIST_PATTERN5 0x68
+#define QSERDES_V5_5NM_TX_BIST_PATTERN6 0x6c
+#define QSERDES_V5_5NM_TX_BIST_PATTERN7 0x70
+#define QSERDES_V5_5NM_TX_BIST_PATTERN8 0x74
+#define QSERDES_V5_5NM_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_5NM_TX_LANE_MODE_2 0x7c
+#define QSERDES_V5_5NM_TX_LANE_MODE_3 0x80
+#define QSERDES_V5_5NM_TX_ATB_SEL1 0x84
+#define QSERDES_V5_5NM_TX_ATB_SEL2 0x88
+#define QSERDES_V5_5NM_TX_RCV_DETECT_LVL 0x8c
+#define QSERDES_V5_5NM_TX_RCV_DETECT_LVL_2 0x90
+#define QSERDES_V5_5NM_TX_PRBS_SEED1 0x94
+#define QSERDES_V5_5NM_TX_PRBS_SEED2 0x98
+#define QSERDES_V5_5NM_TX_PRBS_SEED3 0x9c
+#define QSERDES_V5_5NM_TX_PRBS_SEED4 0xa0
+#define QSERDES_V5_5NM_TX_RESET_GEN 0xa4
+#define QSERDES_V5_5NM_TX_RESET_GEN_MUXES 0xa8
+#define QSERDES_V5_5NM_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V5_5NM_TX_VMODE_CTRL1 0xb0
+#define QSERDES_V5_5NM_TX_ALOG_OBSV_BUS_CTRL_1 0xb4
+#define QSERDES_V5_5NM_TX_BIST_STATUS 0xb8
+#define QSERDES_V5_5NM_TX_BIST_ERROR_COUNT1 0xbc
+#define QSERDES_V5_5NM_TX_BIST_ERROR_COUNT2 0xc0
+#define QSERDES_V5_5NM_TX_ALOG_OBSV_BUS_STATUS_1 0xc4
+#define QSERDES_V5_5NM_TX_LANE_DIG_CONFIG 0xc8
+#define QSERDES_V5_5NM_TX_PI_QEC_CTRL 0xcc
+#define QSERDES_V5_5NM_TX_PRE_EMPH 0xd0
+#define QSERDES_V5_5NM_TX_SW_RESET 0xd4
+#define QSERDES_V5_5NM_TX_TX_BAND 0xd8
+#define QSERDES_V5_5NM_TX_SLEW_CNTL0 0xdc
+#define QSERDES_V5_5NM_TX_SLEW_CNTL1 0xe0
+#define QSERDES_V5_5NM_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V5_5NM_TX_DIG_BKUP_CTRL 0xe8
+#define QSERDES_V5_5NM_TX_DEBUG_BUS0 0xec
+#define QSERDES_V5_5NM_TX_DEBUG_BUS1 0xf0
+#define QSERDES_V5_5NM_TX_DEBUG_BUS2 0xf4
+#define QSERDES_V5_5NM_TX_DEBUG_BUS3 0xf8
+#define QSERDES_V5_5NM_TX_TX_BKUP_RO_BUS 0xfc
+
+/* Only for QMP V5 5NM PHY - RX registers */
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE0 0x000
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE1 0x004
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE2 0x008
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE0 0x010
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE1 0x014
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE2 0x018
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE3 0x01c
+#define QSERDES_V5_5NM_RX_UCDR_SO_SATURATION 0x020
+#define QSERDES_V5_5NM_RX_UCDR_FO_TO_SO_DELAY 0x024
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE0 0x028
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE0 0x02c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE1 0x030
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE1 0x034
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE2 0x038
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE2 0x03c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE3 0x040
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE3 0x044
+#define QSERDES_V5_5NM_RX_UCDR_PI_CTRL1 0x048
+#define QSERDES_V5_5NM_RX_UCDR_PI_CTRL2 0x04c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE0 0x050
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE1 0x054
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE2 0x058
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE3 0x05c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE0 0x060
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE1 0x064
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE2 0x068
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE3 0x06c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE0 0x070
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE1 0x074
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE2 0x078
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE3 0x07c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE0 0x080
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE1 0x084
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE2 0x088
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE3 0x08c
+#define QSERDES_V5_5NM_RX_RXCLK_DIV2_CTRL 0x090
+#define QSERDES_V5_5NM_RX_RX_BAND 0x094
+#define QSERDES_V5_5NM_RX_RX_TERM_BW 0x098
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE0 0x09c
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE1 0x0a0
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE2 0x0a4
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE3 0x0a8
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE0 0x0ac
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE1 0x0b0
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE2 0x0b4
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE3 0x0b8
+#define QSERDES_V5_5NM_RX_UCDR_PI_CONTROLS 0x0bc
+#define QSERDES_V5_5NM_RX_UCDR_PD_DATA_FILTER_ENABLES 0x0c0
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE0 0x0c4
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE1 0x0c8
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE2 0x0cc
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3 0x0d0
+#define QSERDES_V5_5NM_RX_AUX_CONTROL 0x0d4
+#define QSERDES_V5_5NM_RX_AUXDATA_TB 0x0d8
+#define QSERDES_V5_5NM_RX_RCLK_AUXDATA_SEL 0x0dc
+#define QSERDES_V5_5NM_RX_EOM_CTRL 0x0e0
+#define QSERDES_V5_5NM_RX_AC_JTAG_ENABLE 0x0e4
+#define QSERDES_V5_5NM_RX_AC_JTAG_INITP 0x0e8
+#define QSERDES_V5_5NM_RX_AC_JTAG_INITN 0x0ec
+#define QSERDES_V5_5NM_RX_AC_JTAG_LVL 0x0f0
+#define QSERDES_V5_5NM_RX_AC_JTAG_MODE 0x0f4
+#define QSERDES_V5_5NM_RX_AC_JTAG_RESET 0x0f8
+#define QSERDES_V5_5NM_RX_RX_RCVR_IQ_EN 0x0fc
+#define QSERDES_V5_5NM_RX_RX_Q_EN_RATES 0x100
+#define QSERDES_V5_5NM_RX_RX_IDAC_I0_DC_OFFSETS 0x104
+#define QSERDES_V5_5NM_RX_RX_IDAC_I0BAR_DC_OFFSETS 0x108
+#define QSERDES_V5_5NM_RX_RX_IDAC_I1_DC_OFFSETS 0x10c
+#define QSERDES_V5_5NM_RX_RX_IDAC_I1BAR_DC_OFFSETS 0x110
+#define QSERDES_V5_5NM_RX_RX_IDAC_Q_DC_OFFSETS 0x114
+#define QSERDES_V5_5NM_RX_RX_IDAC_QBAR_DC_OFFSETS 0x118
+#define QSERDES_V5_5NM_RX_RX_IDAC_A_DC_OFFSETS 0x11c
+#define QSERDES_V5_5NM_RX_RX_IDAC_ABAR_DC_OFFSETS 0x120
+#define QSERDES_V5_5NM_RX_RX_IDAC_EN 0x124
+#define QSERDES_V5_5NM_RX_RX_IDAC_ENABLES 0x128
+#define QSERDES_V5_5NM_RX_RX_IDAC_SIGN 0x12c
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CODE_OVERRIDE 0x130
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL1 0x134
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL2 0x138
+#define QSERDES_V5_5NM_RX_RX_IVCM_POSTCAL_OFFSET 0x13c
+#define QSERDES_V5_5NM_RX_RX_SUMMER_CAL_SPD_MODE 0x140
+#define QSERDES_V5_5NM_RX_RX_HIGHZ_PARRATE 0x144
+#define QSERDES_V5_5NM_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x148
+#define QSERDES_V5_5NM_RX_DFE_1 0x14c
+#define QSERDES_V5_5NM_RX_DFE_2 0x150
+#define QSERDES_V5_5NM_RX_DFE_3 0x154
+#define QSERDES_V5_5NM_RX_DFE_4 0x158
+#define QSERDES_V5_5NM_RX_DFE_TAP3_CTRL 0x15c
+#define QSERDES_V5_5NM_RX_DFE_TAP3_MANVAL_KTAP 0x160
+#define QSERDES_V5_5NM_RX_DFE_TAP4_CTRL 0x164
+#define QSERDES_V5_5NM_RX_DFE_TAP4_MANVAL_KTAP 0x168
+#define QSERDES_V5_5NM_RX_DFE_TAP5_CTRL 0x16c
+#define QSERDES_V5_5NM_RX_DFE_TAP5_MANVAL_KTAP 0x170
+#define QSERDES_V5_5NM_RX_TX_ADPT_CTRL 0x174
+#define QSERDES_V5_5NM_RX_DFE_DAC_ENABLE1 0x178
+#define QSERDES_V5_5NM_RX_DFE_DAC_ENABLE2 0x17c
+#define QSERDES_V5_5NM_RX_TX_ADAPT_PRE_THRESH1 0x180
+#define QSERDES_V5_5NM_RX_TX_ADAPT_PRE_THRESH2 0x184
+#define QSERDES_V5_5NM_RX_TX_ADAPT_POST_THRESH1 0x188
+#define QSERDES_V5_5NM_RX_TX_ADAPT_POST_THRESH2 0x18c
+#define QSERDES_V5_5NM_RX_TX_ADAPT_MAIN_THRESH1 0x190
+#define QSERDES_V5_5NM_RX_TX_ADAPT_MAIN_THRESH2 0x194
+#define QSERDES_V5_5NM_RX_VGA_CAL_CNTRL1 0x198
+#define QSERDES_V5_5NM_RX_VGA_CAL_CNTRL2 0x19c
+#define QSERDES_V5_5NM_RX_VGA_CAL_MAN_VAL 0x1a0
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_CNTRL1 0x1a4
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_CNTRL2 0x1a8
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE0 0x1ac
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE1 0x1b0
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE2 0x1b4
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE3 0x1b8
+#define QSERDES_V5_5NM_RX_GM_CAL 0x1bc
+#define QSERDES_V5_5NM_RX_RX_VGA_GAIN2_BLK1 0x1c0
+#define QSERDES_V5_5NM_RX_RX_VGA_GAIN2_BLK2 0x1c4
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL2 0x1c8
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL3 0x1cc
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL4 0x1d0
+#define QSERDES_V5_5NM_RX_RX_IDAC_TSETTLE_LOW 0x1d4
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_LSB 0x1d8
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_MSB 0x1dc
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1e0
+#define QSERDES_V5_5NM_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x1e4
+#define QSERDES_V5_5NM_RX_SIGDET_ENABLES 0x1e8
+#define QSERDES_V5_5NM_RX_SIGDET_CNTRL 0x1ec
+#define QSERDES_V5_5NM_RX_SIGDET_LVL 0x1f0
+#define QSERDES_V5_5NM_RX_SIGDET_DEGLITCH_CNTRL 0x1f4
+#define QSERDES_V5_5NM_RX_CDR_FREEZE_UP_DN 0x1f8
+#define QSERDES_V5_5NM_RX_CDR_RESET_OVERRIDE 0x1fc
+#define QSERDES_V5_5NM_RX_RX_INTERFACE_MODE 0x200
+#define QSERDES_V5_5NM_RX_JITTER_GEN_MODE 0x204
+#define QSERDES_V5_5NM_RX_SJ_AMP1 0x208
+#define QSERDES_V5_5NM_RX_SJ_AMP2 0x20c
+#define QSERDES_V5_5NM_RX_SJ_PER1 0x210
+#define QSERDES_V5_5NM_RX_SJ_PER2 0x214
+#define QSERDES_V5_5NM_RX_PPM_OFFSET1 0x218
+#define QSERDES_V5_5NM_RX_PPM_OFFSET2 0x21c
+#define QSERDES_V5_5NM_RX_SIGN_PPM_PERIOD1 0x220
+#define QSERDES_V5_5NM_RX_SIGN_PPM_PERIOD2 0x224
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B0 0x228
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B1 0x22c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B2 0x230
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B3 0x234
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B4 0x238
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B5 0x23c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B6 0x240
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B7 0x244
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B0 0x248
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B1 0x24c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B2 0x250
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B3 0x254
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B4 0x258
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B5 0x25c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B6 0x260
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B7 0x264
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B0 0x268
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B1 0x26c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B2 0x270
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B3 0x274
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B4 0x278
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B5 0x27c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B6 0x280
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B7 0x284
+#define QSERDES_V5_5NM_RX_PHPRE_CTRL 0x288
+#define QSERDES_V5_5NM_RX_PHPRE_INITVAL 0x28c
+#define QSERDES_V5_5NM_RX_DFE_EN_TIMER 0x290
+#define QSERDES_V5_5NM_RX_DFE_CTLE_POST_CAL_OFFSET 0x294
+#define QSERDES_V5_5NM_RX_DCC_CTRL1 0x298
+#define QSERDES_V5_5NM_RX_DCC_CTRL2 0x29c
+#define QSERDES_V5_5NM_RX_DCC_OFFSET 0x2a0
+#define QSERDES_V5_5NM_RX_DCC_CMUX_POSTCAL_OFFSET 0x2a4
+#define QSERDES_V5_5NM_RX_DCC_CMUX_CAL_CTRL1 0x2a8
+#define QSERDES_V5_5NM_RX_DCC_CMUX_CAL_CTRL2 0x2ac
+#define QSERDES_V5_5NM_RX_ALOG_OBSV_BUS_CTRL_1 0x2b0
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL1 0x2b4
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL2 0x2b8
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL3 0x2bc
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL_4 0x2c0
+#define QSERDES_V5_5NM_RX_RX_MARG_CFG_RATE_0_1 0x2c4
+#define QSERDES_V5_5NM_RX_RX_MARG_CFG_RATE_2_3 0x2c8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_CTRL1 0x2cc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_CTRL2 0x2d0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH1_RATE210 0x2d4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH1_RATE3 0x2d8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH2_RATE210 0x2dc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH2_RATE3 0x2e0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH3_RATE210 0x2e4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH3_RATE3 0x2e8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH4_RATE210 0x2ec
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH4_RATE3 0x2f0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH5_RATE210 0x2f4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH5_RATE3 0x2f8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH6_RATE210 0x2fc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH6_RATE3 0x300
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH7_RATE210 0x304
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH7_RATE3 0x308
+#define QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE10 0x30c
+#define QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x310
+#define QSERDES_V5_5NM_RX_RX_MARG_VERTICAL_CTRL 0x314
+#define QSERDES_V5_5NM_RX_RX_MARG_VERTICAL_CODE 0x318
+#define QSERDES_V5_5NM_RX_RES_CODE_THRESH_HIGH_AND_BYP 0x31c
+#define QSERDES_V5_5NM_RX_RES_CODE_THRESH_LOW 0x320
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL1 0x324
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL2 0x328
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL3 0x32c
+#define QSERDES_V5_5NM_RX_PI_CTRL1 0x330
+#define QSERDES_V5_5NM_RX_PI_CTRL2 0x334
+#define QSERDES_V5_5NM_RX_PI_QUAD 0x338
+#define QSERDES_V5_5NM_RX_QPI_CTRL1 0x33c
+#define QSERDES_V5_5NM_RX_QPI_CTRL2 0x340
+#define QSERDES_V5_5NM_RX_QPI_QUAD 0x344
+#define QSERDES_V5_5NM_RX_IDATA1 0x348
+#define QSERDES_V5_5NM_RX_IDATA2 0x34c
+#define QSERDES_V5_5NM_RX_IDATA3 0x350
+#define QSERDES_V5_5NM_RX_AC_JTAG_OUTP 0x354
+#define QSERDES_V5_5NM_RX_AC_JTAG_OUTN 0x358
+#define QSERDES_V5_5NM_RX_RX_SIGDET 0x35c
+#define QSERDES_V5_5NM_RX_ALOG_OBSV_BUS_STATUS_1 0x360
+#define QSERDES_V5_5NM_RX_READ_EQCODE 0x364
+#define QSERDES_V5_5NM_RX_READ_OFFSETCODE 0x368
+#define QSERDES_V5_5NM_RX_IA_ERROR_COUNTER_LOW 0x36c
+#define QSERDES_V5_5NM_RX_IA_ERROR_COUNTER_HIGH 0x370
+#define QSERDES_V5_5NM_RX_VGA_READ_CODE 0x374
+#define QSERDES_V5_5NM_RX_VTHRESH_READ_CODE 0x378
+#define QSERDES_V5_5NM_RX_DFE_TAP1_READ_CODE 0x37c
+#define QSERDES_V5_5NM_RX_DFE_TAP2_READ_CODE 0x380
+#define QSERDES_V5_5NM_RX_DFE_TAP3_READ_CODE 0x384
+#define QSERDES_V5_5NM_RX_DFE_TAP4_READ_CODE 0x388
+#define QSERDES_V5_5NM_RX_DFE_TAP5_READ_CODE 0x38c
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I0 0x390
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I0BAR 0x394
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I1 0x398
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I1BAR 0x39c
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_Q 0x3a0
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_QBAR 0x3a4
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_A 0x3a8
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_ABAR 0x3ac
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_SM_ON 0x3b0
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_SIGNERROR 0x3b4
+#define QSERDES_V5_5NM_RX_IVCM_CAL_STATUS 0x3b8
+#define QSERDES_V5_5NM_RX_IVCM_CAL_DEBUG_STATUS 0x3bc
+#define QSERDES_V5_5NM_RX_DCC_CAL_STATUS 0x3c0
+#define QSERDES_V5_5NM_RX_DCC_READ_CODE_STATUS 0x3c4
+#define QSERDES_V5_5NM_RX_RX_MARG_DEBUG1_STATUS 0x3c8
+#define QSERDES_V5_5NM_RX_RX_MARG_DEBUG2_STATUS 0x3cc
+#define QSERDES_V5_5NM_RX_RX_MARG_READ_CODE_STATUS 0x3d0
+#define QSERDES_V5_5NM_RX_EOM_ERR_CNT_LSB_STATUS 0x3d4
+#define QSERDES_V5_5NM_RX_EOM_ERR_CNT_MSB_STATUS 0x3d8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_TUNE_STATUS 0x3dc
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS1_STATUS 0x3e0
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS2_STATUS 0x3e4
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS3_STATUS 0x3e8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index c8583f5a54bd..c08d34ad1313 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -28,53 +28,15 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-#define POWER_DOWN_DELAY_US_MIN 10
-#define POWER_DOWN_DELAY_US_MAX 11
-
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
struct qmp_phy_init_tbl {
unsigned int offset;
@@ -115,22 +77,11 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
QPHY_PCS_READY_STATUS,
- QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -580,14 +531,9 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -614,9 +560,6 @@ struct qmp_phy_cfg {
/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
unsigned int phy_status;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* true, if PCS block has no separate SW_RESET register */
bool no_pcs_sw_reset;
};
@@ -633,9 +576,7 @@ struct qmp_phy_cfg {
* @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -647,9 +588,7 @@ struct qmp_phy {
void __iomem *tx2;
void __iomem *rx2;
void __iomem *pcs_misc;
- unsigned int index;
struct qcom_qmp *qmp;
- enum phy_mode mode;
};
/**
@@ -719,8 +658,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg msm8996_ufs_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8996_ufs_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
@@ -745,8 +683,7 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = {
};
static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sdm845_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
@@ -766,13 +703,11 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
- .is_dual_lane_phy = true,
.no_pcs_sw_reset = true,
};
static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm6115_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl),
@@ -795,8 +730,7 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
};
static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8150_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
@@ -815,13 +749,10 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8350_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
@@ -840,13 +771,10 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8350_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
@@ -865,11 +793,9 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base,
+static void qmp_ufs_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -892,27 +818,27 @@ static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_ufs_configure(void __iomem *base,
+static void qmp_ufs_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_ufs_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_ufs_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_ufs_serdes_init(struct qmp_phy *qphy)
+static int qmp_ufs_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
return 0;
}
-static int qcom_qmp_phy_ufs_com_init(struct qmp_phy *qphy)
+static int qmp_ufs_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -946,7 +872,7 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy)
+static int qmp_ufs_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -960,7 +886,7 @@ static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_ufs_init(struct phy *phy)
+static int qmp_ufs_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -995,14 +921,14 @@ static int qcom_qmp_phy_ufs_init(struct phy *phy)
return ret;
}
- ret = qcom_qmp_phy_ufs_com_init(qphy);
+ ret = qmp_ufs_com_init(qphy);
if (ret)
return ret;
return 0;
}
-static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
+static int qmp_ufs_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1014,27 +940,24 @@ static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_ufs_serdes_init(qphy);
+ qmp_ufs_serdes_init(qphy);
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_ufs_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_ufs_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_ufs_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_ufs_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
}
- qcom_qmp_phy_ufs_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_ufs_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_ufs_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_ufs_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
}
- qcom_qmp_phy_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
@@ -1060,7 +983,7 @@ static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_ufs_power_off(struct phy *phy)
+static int qmp_ufs_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1084,51 +1007,41 @@ static int qcom_qmp_phy_ufs_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_ufs_exit(struct phy *phy)
+static int qmp_ufs_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- qcom_qmp_phy_ufs_com_exit(qphy);
+ qmp_ufs_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_ufs_enable(struct phy *phy)
+static int qmp_ufs_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_ufs_init(phy);
+ ret = qmp_ufs_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_ufs_power_on(phy);
+ ret = qmp_ufs_power_on(phy);
if (ret)
- qcom_qmp_phy_ufs_exit(phy);
+ qmp_ufs_exit(phy);
return ret;
}
-static int qcom_qmp_phy_ufs_disable(struct phy *phy)
+static int qmp_ufs_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_ufs_power_off(phy);
+ ret = qmp_ufs_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_ufs_exit(phy);
+ return qmp_ufs_exit(phy);
}
-static int qcom_qmp_phy_ufs_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qphy->mode = mode;
-
- return 0;
-}
-
-static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -1144,7 +1057,7 @@ static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_c
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -1161,14 +1074,12 @@ static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cf
}
static const struct phy_ops qcom_qmp_ufs_ops = {
- .power_on = qcom_qmp_phy_ufs_enable,
- .power_off = qcom_qmp_phy_ufs_disable,
- .set_mode = qcom_qmp_phy_ufs_set_mode,
+ .power_on = qmp_ufs_enable,
+ .power_off = qmp_ufs_disable,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
+static int qmp_ufs_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
@@ -1188,45 +1099,33 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc))
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
generic_phy = devm_phy_create(dev, np, &qcom_qmp_ufs_ops);
@@ -1237,7 +1136,6 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -1245,7 +1143,7 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = {
+static const struct of_device_id qmp_ufs_of_match_table[] = {
{
.compatible = "qcom,msm8996-qmp-ufs-phy",
.data = &msm8996_ufs_cfg,
@@ -1282,9 +1180,9 @@ static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_ufs_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_ufs_of_match_table);
-static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
+static int qmp_ufs_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -1312,17 +1210,14 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- ret = qcom_qmp_phy_ufs_clk_init(dev, cfg);
+ ret = qmp_ufs_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_ufs_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_ufs_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -1333,18 +1228,10 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_ufs_create(dev, child, id, serdes, cfg);
+ ret = qmp_ufs_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -1355,28 +1242,23 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_ufs_driver = {
- .probe = qcom_qmp_phy_ufs_probe,
+static struct platform_driver qmp_ufs_driver = {
+ .probe = qmp_ufs_probe,
.driver = {
.name = "qcom-qmp-ufs-phy",
- .of_match_table = qcom_qmp_phy_ufs_of_match_table,
+ .of_match_table = qmp_ufs_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_ufs_driver);
+module_platform_driver(qmp_ufs_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP UFS PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 1d270356a97f..b84c0d4b5754 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -28,16 +28,11 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -71,11 +66,6 @@
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -115,15 +105,9 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
@@ -1338,14 +1322,114 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
};
-struct qmp_phy;
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -1385,8 +1469,6 @@ struct qmp_phy_cfg {
/* true, if PHY has a separate DP_COM control block */
bool has_phy_dp_com_ctrl;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
@@ -1406,7 +1488,6 @@ struct qmp_phy_cfg {
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pcs_usb: iomapped memory space for lane's pcs_usb
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
* @mode: current PHY mode
*/
@@ -1422,7 +1503,6 @@ struct qmp_phy {
void __iomem *pcs_misc;
void __iomem *pcs_usb;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
enum phy_mode mode;
};
@@ -1520,8 +1600,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1545,8 +1624,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
};
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
@@ -1570,8 +1648,7 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
};
static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -1598,12 +1675,10 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -1630,12 +1705,38 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
+ .lanes = 1,
+
+ .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sc8280xp_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_tx_tbl),
+ .rx_tbl = sc8280xp_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sc8280xp_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
@@ -1663,8 +1764,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = msm8998_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
@@ -1685,13 +1785,10 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1722,12 +1819,10 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1758,8 +1853,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1789,12 +1883,10 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1825,8 +1917,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1857,8 +1948,7 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1889,8 +1979,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1920,12 +2009,10 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1956,8 +2043,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qcm2290_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
@@ -1978,11 +2064,9 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_usb_configure_lane(void __iomem *base,
+static void qmp_usb_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -2005,28 +2089,29 @@ static void qcom_qmp_phy_usb_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_usb_configure(void __iomem *base,
+static void qmp_usb_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_usb_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_usb_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_usb_serdes_init(struct qmp_phy *qphy)
+static int qmp_usb_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
return 0;
}
-static int qcom_qmp_phy_usb_com_init(struct qmp_phy *qphy)
+static int qmp_usb_init(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
@@ -2097,8 +2182,9 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy)
+static int qmp_usb_exit(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2111,21 +2197,7 @@ static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_usb_init(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- ret = qcom_qmp_phy_usb_com_init(qphy);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int qcom_qmp_phy_usb_power_on(struct phy *phy)
+static int qmp_usb_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -2137,7 +2209,7 @@ static int qcom_qmp_phy_usb_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_usb_serdes_init(qphy);
+ qmp_usb_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -2146,25 +2218,22 @@ static int qcom_qmp_phy_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_usb_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_usb_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_usb_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_usb_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
}
- qcom_qmp_phy_usb_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_usb_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_usb_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_usb_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
}
/* Configure link rate, swing, etc. */
- qcom_qmp_phy_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
@@ -2194,7 +2263,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_usb_power_off(struct phy *phy)
+static int qmp_usb_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2219,42 +2288,32 @@ static int qcom_qmp_phy_usb_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_usb_exit(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qcom_qmp_phy_usb_com_exit(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_usb_enable(struct phy *phy)
+static int qmp_usb_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_usb_init(phy);
+ ret = qmp_usb_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_power_on(phy);
+ ret = qmp_usb_power_on(phy);
if (ret)
- qcom_qmp_phy_usb_exit(phy);
+ qmp_usb_exit(phy);
return ret;
}
-static int qcom_qmp_phy_usb_disable(struct phy *phy)
+static int qmp_usb_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_usb_power_off(phy);
+ ret = qmp_usb_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_usb_exit(phy);
+ return qmp_usb_exit(phy);
}
-static int qcom_qmp_phy_usb_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
+static int qmp_usb_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -2263,7 +2322,7 @@ static int qcom_qmp_phy_usb_set_mode(struct phy *phy,
return 0;
}
-static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_usb_enable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -2292,7 +2351,7 @@ static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
}
-static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_usb_disable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -2310,7 +2369,7 @@ static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
}
-static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
+static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -2318,16 +2377,12 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
if (!qphy->phy->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
}
- qcom_qmp_phy_usb_enable_autonomous_mode(qphy);
+ qmp_usb_enable_autonomous_mode(qphy);
clk_disable_unprepare(qphy->pipe_clk);
clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
@@ -2335,7 +2390,7 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
+static int __maybe_unused qmp_usb_runtime_resume(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -2344,10 +2399,6 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
if (!qphy->phy->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
@@ -2364,12 +2415,12 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
return ret;
}
- qcom_qmp_phy_usb_disable_autonomous_mode(qphy);
+ qmp_usb_disable_autonomous_mode(qphy);
return 0;
}
-static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2385,7 +2436,7 @@ static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_c
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2406,7 +2457,7 @@ static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_
return 0;
}
-static int qcom_qmp_phy_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2482,23 +2533,47 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_usb_ops = {
- .init = qcom_qmp_phy_usb_enable,
- .exit = qcom_qmp_phy_usb_disable,
- .set_mode = qcom_qmp_phy_usb_set_mode,
+static const struct phy_ops qmp_usb_ops = {
+ .init = qmp_usb_enable,
+ .exit = qmp_usb_disable,
+ .set_mode = qmp_usb_set_mode,
.owner = THIS_MODULE,
};
+static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np,
+ int index, bool exclusive)
+{
+ struct resource res;
+
+ if (!exclusive) {
+ if (of_address_to_resource(np, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ return devm_ioremap(dev, res.start, resource_size(&res));
+ }
+
+ return devm_of_iomap(dev, np, index, NULL);
+}
+
static
-int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
+int qmp_usb_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
+ bool exclusive = true;
int ret;
+ /*
+ * FIXME: These bindings should be fixed to not rely on overlapping
+ * mappings for PCS.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,sdx65-qmp-usb3-uni-phy"))
+ exclusive = false;
+ if (of_device_is_compatible(dev->of_node, "qcom,sm8350-qmp-usb3-uni-phy"))
+ exclusive = false;
+
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
if (!qphy)
return -ENOMEM;
@@ -2511,58 +2586,47 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = qmp_usb_iomap(dev, np, 2, exclusive);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
if (cfg->pcs_usb_offset)
qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs_misc = NULL;
+ }
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_usb_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_usb_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -2570,7 +2634,6 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2578,7 +2641,7 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
+static const struct of_device_id qmp_usb_of_match_table[] = {
{
.compatible = "qcom,ipq8074-qmp-usb3-phy",
.data = &ipq8074_usb3phy_cfg,
@@ -2595,6 +2658,9 @@ static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
.compatible = "qcom,sc8180x-qmp-usb3-phy",
.data = &sm8150_usb3phy_cfg,
}, {
+ .compatible = "qcom,sc8280xp-qmp-usb3-uni-phy",
+ .data = &sc8280xp_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sdm845-qmp-usb3-phy",
.data = &qmp_v3_usb3phy_cfg,
}, {
@@ -2636,14 +2702,14 @@ static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_usb_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_usb_of_match_table);
-static const struct dev_pm_ops qcom_qmp_phy_usb_pm_ops = {
- SET_RUNTIME_PM_OPS(qcom_qmp_phy_usb_runtime_suspend,
- qcom_qmp_phy_usb_runtime_resume, NULL)
+static const struct dev_pm_ops qmp_usb_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usb_runtime_suspend,
+ qmp_usb_runtime_resume, NULL)
};
-static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
+static int qmp_usb_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2678,21 +2744,18 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
return PTR_ERR(qmp->dp_com);
}
- ret = qcom_qmp_phy_usb_clk_init(dev, cfg);
+ ret = qmp_usb_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_reset_init(dev, cfg);
+ ret = qmp_usb_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_usb_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2704,7 +2767,9 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* Prevent runtime pm from being ON by default. Users can enable
* it using power/control in sysfs.
@@ -2714,7 +2779,7 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_usb_create(dev, child, id, serdes, cfg);
+ ret = qmp_usb_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2736,29 +2801,24 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_usb_driver = {
- .probe = qcom_qmp_phy_usb_probe,
+static struct platform_driver qmp_usb_driver = {
+ .probe = qmp_usb_probe,
.driver = {
.name = "qcom-qmp-usb-phy",
- .pm = &qcom_qmp_phy_usb_pm_ops,
- .of_match_table = qcom_qmp_phy_usb_of_match_table,
+ .pm = &qmp_usb_pm_ops,
+ .of_match_table = qmp_usb_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_usb_driver);
+module_platform_driver(qmp_usb_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP USB PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index b139c8af5e8b..26274e3c0cf9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -19,6 +19,7 @@
#include "phy-qcom-qmp-qserdes-com-v5.h"
#include "phy-qcom-qmp-qserdes-txrx-v5.h"
#include "phy-qcom-qmp-qserdes-txrx-v5_20.h"
+#include "phy-qcom-qmp-qserdes-txrx-v5_5nm.h"
#include "phy-qcom-qmp-qserdes-pll.h"
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 7529a7e6e5df..2ef638b32e8f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -973,20 +973,14 @@ static int qusb2_phy_probe(struct platform_device *pdev)
return PTR_ERR(qphy->base);
qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb");
- if (IS_ERR(qphy->cfg_ahb_clk)) {
- ret = PTR_ERR(qphy->cfg_ahb_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get cfg ahb clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(qphy->cfg_ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->cfg_ahb_clk),
+ "failed to get cfg ahb clk\n");
qphy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(qphy->ref_clk)) {
- ret = PTR_ERR(qphy->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get ref clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(qphy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->ref_clk),
+ "failed to get ref clk\n");
qphy->iface_clk = devm_clk_get_optional(dev, "iface");
if (IS_ERR(qphy->iface_clk))
@@ -1003,12 +997,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
qphy->vregs[i].supply = qusb2_phy_vreg_names[i];
ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
/* Get the specific init parameters of QMP phy */
qphy->cfg = of_device_get_match_data(dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index 5d203784f75d..a59063596214 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -52,6 +52,12 @@
#define USB2_SUSPEND_N BIT(2)
#define USB2_SUSPEND_N_SEL BIT(3)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0 (0x6c)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1 (0x70)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2 (0x74)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X3 (0x78)
+#define PARAM_OVRD_MASK 0xFF
+
#define USB2_PHY_USB_PHY_CFG0 (0x94)
#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN BIT(0)
#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1)
@@ -60,12 +66,47 @@
#define REFCLK_SEL_MASK GENMASK(1, 0)
#define REFCLK_SEL_DEFAULT (0x2 << 0)
+#define HS_DISCONNECT_MASK GENMASK(2, 0)
+#define SQUELCH_DETECTOR_MASK GENMASK(7, 5)
+
+#define HS_AMPLITUDE_MASK GENMASK(3, 0)
+#define PREEMPHASIS_DURATION_MASK BIT(5)
+#define PREEMPHASIS_AMPLITUDE_MASK GENMASK(7, 6)
+
+#define HS_RISE_FALL_MASK GENMASK(1, 0)
+#define HS_CROSSOVER_VOLTAGE_MASK GENMASK(3, 2)
+#define HS_OUTPUT_IMPEDANCE_MASK GENMASK(5, 4)
+
+#define LS_FS_OUTPUT_IMPEDANCE_MASK GENMASK(3, 0)
+
static const char * const qcom_snps_hsphy_vreg_names[] = {
"vdda-pll", "vdda33", "vdda18",
};
#define SNPS_HS_NUM_VREGS ARRAY_SIZE(qcom_snps_hsphy_vreg_names)
+struct override_param {
+ s32 value;
+ u8 reg_val;
+};
+
+struct override_param_map {
+ const char *prop_name;
+ const struct override_param *param_table;
+ u8 table_size;
+ u8 reg_offset;
+ u8 param_mask;
+};
+
+struct phy_override_seq {
+ bool need_update;
+ u8 offset;
+ u8 value;
+ u8 mask;
+};
+
+#define NUM_HSPHY_TUNING_PARAMS (9)
+
/**
* struct qcom_snps_hsphy - snps hs phy attributes
*
@@ -91,6 +132,7 @@ struct qcom_snps_hsphy {
bool phy_initialized;
enum phy_mode mode;
+ struct phy_override_seq update_seq_cfg[NUM_HSPHY_TUNING_PARAMS];
};
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
@@ -173,10 +215,158 @@ static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
return 0;
}
+static const struct override_param hs_disconnect_sc7280[] = {
+ { -272, 0 },
+ { 0, 1 },
+ { 317, 2 },
+ { 630, 3 },
+ { 973, 4 },
+ { 1332, 5 },
+ { 1743, 6 },
+ { 2156, 7 },
+};
+
+static const struct override_param squelch_det_threshold_sc7280[] = {
+ { -2090, 7 },
+ { -1560, 6 },
+ { -1030, 5 },
+ { -530, 4 },
+ { 0, 3 },
+ { 530, 2 },
+ { 1060, 1 },
+ { 1590, 0 },
+};
+
+static const struct override_param hs_amplitude_sc7280[] = {
+ { -660, 0 },
+ { -440, 1 },
+ { -220, 2 },
+ { 0, 3 },
+ { 230, 4 },
+ { 440, 5 },
+ { 650, 6 },
+ { 890, 7 },
+ { 1110, 8 },
+ { 1330, 9 },
+ { 1560, 10 },
+ { 1780, 11 },
+ { 2000, 12 },
+ { 2220, 13 },
+ { 2430, 14 },
+ { 2670, 15 },
+};
+
+static const struct override_param preemphasis_duration_sc7280[] = {
+ { 10000, 1 },
+ { 20000, 0 },
+};
+
+static const struct override_param preemphasis_amplitude_sc7280[] = {
+ { 10000, 1 },
+ { 20000, 2 },
+ { 30000, 3 },
+ { 40000, 0 },
+};
+
+static const struct override_param hs_rise_fall_time_sc7280[] = {
+ { -4100, 3 },
+ { 0, 2 },
+ { 2810, 1 },
+ { 5430, 0 },
+};
+
+static const struct override_param hs_crossover_voltage_sc7280[] = {
+ { -31000, 1 },
+ { 0, 3 },
+ { 28000, 2 },
+};
+
+static const struct override_param hs_output_impedance_sc7280[] = {
+ { -2300000, 3 },
+ { 0, 2 },
+ { 2600000, 1 },
+ { 6100000, 0 },
+};
+
+static const struct override_param ls_fs_output_impedance_sc7280[] = {
+ { -1053, 15 },
+ { -557, 7 },
+ { 0, 3 },
+ { 612, 1 },
+ { 1310, 0 },
+};
+
+static const struct override_param_map sc7280_snps_7nm_phy[] = {
+ {
+ "qcom,hs-disconnect-bp",
+ hs_disconnect_sc7280,
+ ARRAY_SIZE(hs_disconnect_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0,
+ HS_DISCONNECT_MASK
+ },
+ {
+ "qcom,squelch-detector-bp",
+ squelch_det_threshold_sc7280,
+ ARRAY_SIZE(squelch_det_threshold_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0,
+ SQUELCH_DETECTOR_MASK
+ },
+ {
+ "qcom,hs-amplitude-bp",
+ hs_amplitude_sc7280,
+ ARRAY_SIZE(hs_amplitude_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ HS_AMPLITUDE_MASK
+ },
+ {
+ "qcom,pre-emphasis-duration-bp",
+ preemphasis_duration_sc7280,
+ ARRAY_SIZE(preemphasis_duration_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ PREEMPHASIS_DURATION_MASK,
+ },
+ {
+ "qcom,pre-emphasis-amplitude-bp",
+ preemphasis_amplitude_sc7280,
+ ARRAY_SIZE(preemphasis_amplitude_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ PREEMPHASIS_AMPLITUDE_MASK,
+ },
+ {
+ "qcom,hs-rise-fall-time-bp",
+ hs_rise_fall_time_sc7280,
+ ARRAY_SIZE(hs_rise_fall_time_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_RISE_FALL_MASK
+ },
+ {
+ "qcom,hs-crossover-voltage-microvolt",
+ hs_crossover_voltage_sc7280,
+ ARRAY_SIZE(hs_crossover_voltage_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_CROSSOVER_VOLTAGE_MASK
+ },
+ {
+ "qcom,hs-output-impedance-micro-ohms",
+ hs_output_impedance_sc7280,
+ ARRAY_SIZE(hs_output_impedance_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_OUTPUT_IMPEDANCE_MASK,
+ },
+ {
+ "qcom,ls-fs-output-impedance-bp",
+ ls_fs_output_impedance_sc7280,
+ ARRAY_SIZE(ls_fs_output_impedance_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X3,
+ LS_FS_OUTPUT_IMPEDANCE_MASK,
+ },
+ {},
+};
+
static int qcom_snps_hsphy_init(struct phy *phy)
{
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
- int ret;
+ int ret, i;
dev_vdbg(&phy->dev, "%s(): Initializing SNPS HS phy\n", __func__);
@@ -223,6 +413,14 @@ static int qcom_snps_hsphy_init(struct phy *phy)
qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL1,
VBUSVLDEXT0, VBUSVLDEXT0);
+ for (i = 0; i < ARRAY_SIZE(hsphy->update_seq_cfg); i++) {
+ if (hsphy->update_seq_cfg[i].need_update)
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ hsphy->update_seq_cfg[i].offset,
+ hsphy->update_seq_cfg[i].mask,
+ hsphy->update_seq_cfg[i].value);
+ }
+
qcom_snps_hsphy_write_mask(hsphy->base,
USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
VREGBYPASS, VREGBYPASS);
@@ -280,7 +478,10 @@ static const struct phy_ops qcom_snps_hsphy_gen_ops = {
static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
{ .compatible = "qcom,sm8150-usb-hs-phy", },
{ .compatible = "qcom,usb-snps-hs-5nm-phy", },
- { .compatible = "qcom,usb-snps-hs-7nm-phy", },
+ {
+ .compatible = "qcom,usb-snps-hs-7nm-phy",
+ .data = &sc7280_snps_7nm_phy,
+ },
{ .compatible = "qcom,usb-snps-femto-v2-phy", },
{ }
};
@@ -291,6 +492,55 @@ static const struct dev_pm_ops qcom_snps_hsphy_pm_ops = {
qcom_snps_hsphy_runtime_resume, NULL)
};
+static void qcom_snps_hsphy_override_param_update_val(
+ const struct override_param_map map,
+ s32 dt_val, struct phy_override_seq *seq_entry)
+{
+ int i;
+
+ /*
+ * Param table for each param is in increasing order
+ * of dt values. We need to iterate over the list to
+ * select the entry that matches the dt value and pick
+ * up the corresponding register value.
+ */
+ for (i = 0; i < map.table_size - 1; i++) {
+ if (map.param_table[i].value == dt_val)
+ break;
+ }
+
+ seq_entry->need_update = true;
+ seq_entry->offset = map.reg_offset;
+ seq_entry->mask = map.param_mask;
+ seq_entry->value = map.param_table[i].reg_val << __ffs(map.param_mask);
+}
+
+static void qcom_snps_hsphy_read_override_param_seq(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ s32 val;
+ int ret, i;
+ struct qcom_snps_hsphy *hsphy;
+ const struct override_param_map *cfg = of_device_get_match_data(dev);
+
+ if (!cfg)
+ return;
+
+ hsphy = dev_get_drvdata(dev);
+
+ for (i = 0; cfg[i].prop_name != NULL; i++) {
+ ret = of_property_read_s32(node, cfg[i].prop_name, &val);
+ if (ret)
+ continue;
+
+ qcom_snps_hsphy_override_param_update_val(cfg[i], val,
+ &hsphy->update_seq_cfg[i]);
+ dev_dbg(&hsphy->phy->dev, "Read param: %s dt_val: %d reg_val: 0x%x\n",
+ cfg[i].prop_name, val, hsphy->update_seq_cfg[i].value);
+
+ }
+}
+
static int qcom_snps_hsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -309,12 +559,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
return PTR_ERR(hsphy->base);
hsphy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(hsphy->ref_clk)) {
- ret = PTR_ERR(hsphy->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get ref clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsphy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk),
+ "failed to get ref clk\n");
hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(hsphy->phy_reset)) {
@@ -327,12 +574,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i];
ret = devm_regulator_bulk_get(dev, num, hsphy->vregs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -352,6 +596,7 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, hsphy);
phy_set_drvdata(generic_phy, hsphy);
+ qcom_snps_hsphy_read_override_param_seq(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (!IS_ERR(phy_provider))
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 716a77748ed8..20f6dd37c7c1 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -54,8 +54,10 @@ static int qcom_usb_hsic_phy_power_on(struct phy *phy)
/* Configure pins for HSIC functionality */
pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pins_default))
- return PTR_ERR(pins_default);
+ if (IS_ERR(pins_default)) {
+ ret = PTR_ERR(pins_default);
+ goto err_ulpi;
+ }
ret = pinctrl_select_state(uphy->pctl, pins_default);
if (ret)
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 9022e395c056..94360fc96a6f 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -83,6 +83,15 @@ config PHY_ROCKCHIP_PCIE
help
Enable this to support the Rockchip PCIe PHY.
+config PHY_ROCKCHIP_SNPS_PCIE3
+ tristate "Rockchip Snps PCIe3 PHY Driver"
+ depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the Rockchip snps PCIe3 PHY.
+
config PHY_ROCKCHIP_TYPEC
tristate "Rockchip TYPEC PHY Driver"
depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index a5041efb5b8f..7eab129230d1 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY) += phy-rockchip-naneng-combphy.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
+obj-$(CONFIG_PHY_ROCKCHIP_SNPS_PCIE3) += phy-rockchip-snps-pcie3.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
index ca13a604ab4f..75f948bdea6a 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
@@ -27,6 +27,9 @@
#define RK3368_GRF_SOC_CON6_OFFSET 0x0418
+#define RK3568_GRF_VI_CON0 0x0340
+#define RK3568_GRF_VI_CON1 0x0344
+
/* PHY */
#define CSIDPHY_CTRL_LANE_ENABLE 0x00
#define CSIDPHY_CTRL_LANE_ENABLE_CK BIT(6)
@@ -58,9 +61,11 @@
#define RK1808_CSIDPHY_CLK_WR_THS_SETTLE 0x160
#define RK3326_CSIDPHY_CLK_WR_THS_SETTLE 0x100
#define RK3368_CSIDPHY_CLK_WR_THS_SETTLE 0x100
+#define RK3568_CSIDPHY_CLK_WR_THS_SETTLE 0x160
/* Calibration reception enable */
#define RK1808_CSIDPHY_CLK_CALIB_EN 0x168
+#define RK3568_CSIDPHY_CLK_CALIB_EN 0x168
/*
* The higher 16-bit of this register is used for write protection
@@ -103,6 +108,12 @@ static const struct dphy_reg rk3368_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3368_GRF_SOC_CON6_OFFSET, 4, 8),
};
+static const struct dphy_reg rk3568_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3568_GRF_VI_CON0, 4, 0),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 4, 4),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 1, 8),
+};
+
struct hsfreq_range {
u32 range_h;
u8 cfg_bit;
@@ -352,6 +363,15 @@ static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
.grf_regs = rk3368_grf_dphy_regs,
};
+static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
+ .pwrctl_offset = -1,
+ .ths_settle_offset = RK3568_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = RK3568_CSIDPHY_CLK_CALIB_EN,
+ .hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3568_grf_dphy_regs,
+};
+
static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
{
.compatible = "rockchip,px30-csi-dphy",
@@ -369,6 +389,10 @@ static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
.compatible = "rockchip,rk3368-csi-dphy",
.data = &rk3368_mipidphy_drv_data,
},
+ {
+ .compatible = "rockchip,rk3568-csi-dphy",
+ .data = &rk3568_mipidphy_drv_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_inno_csidphy_match_id);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 630e01b5c19b..2c5847faff63 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -84,9 +84,25 @@
#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
/* Analog Register Part: reg08 */
+#define PLL_POST_DIV_ENABLE_MASK BIT(5)
+#define PLL_POST_DIV_ENABLE BIT(5)
#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+#define LOWFRE_EN_MASK BIT(5)
+#define PLL_OUTPUT_FREQUENCY_DIV_BY_1 0
+#define PLL_OUTPUT_FREQUENCY_DIV_BY_2 1
+/* Analog Register Part: reg0b */
+#define CLOCK_LANE_VOD_RANGE_SET_MASK GENMASK(3, 0)
+#define CLOCK_LANE_VOD_RANGE_SET(x) UPDATE(x, 3, 0)
+#define VOD_MIN_RANGE 0x1
+#define VOD_MID_RANGE 0x3
+#define VOD_BIG_RANGE 0x7
+#define VOD_MAX_RANGE 0xf
+/* Analog Register Part: reg1E */
+#define PLL_MODE_SEL_MASK GENMASK(6, 5)
+#define PLL_MODE_SEL_LVDS_MODE 0
+#define PLL_MODE_SEL_MIPI_MODE BIT(5)
/* Digital Register Part: reg00 */
#define REG_DIG_RSTN_MASK BIT(0)
#define REG_DIG_RSTN_NORMAL BIT(0)
@@ -102,20 +118,22 @@
#define T_LPX_CNT_MASK GENMASK(5, 0)
#define T_LPX_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_ZERO_CNT_HI_MASK BIT(7)
+#define T_HS_ZERO_CNT_HI(x) UPDATE(x, 7, 7)
#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
-#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
-#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+#define T_HS_ZERO_CNT_LO_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT_LO(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
-#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
-#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+#define T_HS_EXIT_CNT_LO_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT_LO(x) UPDATE(x, 4, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
-#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
-#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+#define T_CLK_POST_CNT_LO_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT_LO(x) UPDATE(x, 3, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
#define LPDT_TX_PPI_SYNC_MASK BIT(2)
#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
@@ -129,9 +147,13 @@
#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_CLK_POST_CNT_HI_MASK GENMASK(7, 6)
+#define T_CLK_POST_CNT_HI(x) UPDATE(x, 7, 6)
#define T_TA_GO_CNT_MASK GENMASK(5, 0)
#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_HS_EXIT_CNT_HI_MASK BIT(6)
+#define T_HS_EXIT_CNT_HI(x) UPDATE(x, 6, 6)
#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
@@ -169,11 +191,23 @@
#define DSI_PHY_STATUS 0xb0
#define PHY_LOCK BIT(0)
+enum phy_max_rate {
+ MAX_1GHZ,
+ MAX_2_5GHZ,
+};
+
+struct inno_video_phy_plat_data {
+ const struct inno_mipi_dphy_timing *inno_mipi_dphy_timing_table;
+ const unsigned int num_timings;
+ enum phy_max_rate max_rate;
+};
+
struct inno_dsidphy {
struct device *dev;
struct clk *ref_clk;
struct clk *pclk_phy;
struct clk *pclk_host;
+ const struct inno_video_phy_plat_data *pdata;
void __iomem *phy_base;
void __iomem *host_base;
struct reset_control *rst;
@@ -200,6 +234,53 @@ enum {
REGISTER_PART_LVDS,
};
+struct inno_mipi_dphy_timing {
+ unsigned long rate;
+ u8 lpx;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+};
+
+static const
+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_1ghz[] = {
+ { 110000000, 0x0, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x0, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x0, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x0, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x0, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x0, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x0, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x0, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x0, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x0, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x0, 0x09, 0x20, 0x09, 0x27},
+};
+
+static const
+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_2_5ghz[] = {
+ { 110000000, 0x02, 0x7f, 0x16, 0x02, 0x02},
+ { 150000000, 0x02, 0x7f, 0x16, 0x03, 0x02},
+ { 200000000, 0x02, 0x7f, 0x17, 0x04, 0x02},
+ { 250000000, 0x02, 0x7f, 0x17, 0x05, 0x04},
+ { 300000000, 0x02, 0x7f, 0x18, 0x06, 0x04},
+ { 400000000, 0x03, 0x7e, 0x19, 0x07, 0x04},
+ { 500000000, 0x03, 0x7c, 0x1b, 0x07, 0x08},
+ { 600000000, 0x03, 0x70, 0x1d, 0x08, 0x10},
+ { 700000000, 0x05, 0x40, 0x1e, 0x08, 0x30},
+ { 800000000, 0x05, 0x02, 0x1f, 0x09, 0x30},
+ {1000000000, 0x05, 0x08, 0x20, 0x09, 0x30},
+ {1200000000, 0x06, 0x03, 0x32, 0x14, 0x0f},
+ {1400000000, 0x09, 0x03, 0x32, 0x14, 0x0f},
+ {1600000000, 0x0d, 0x42, 0x36, 0x0e, 0x0f},
+ {1800000000, 0x0e, 0x47, 0x7a, 0x0e, 0x0f},
+ {2000000000, 0x11, 0x64, 0x7a, 0x0e, 0x0b},
+ {2200000000, 0x13, 0x64, 0x7e, 0x15, 0x0b},
+ {2400000000, 0x13, 0x33, 0x7f, 0x15, 0x6a},
+ {2500000000, 0x15, 0x54, 0x7f, 0x15, 0x6a},
+};
+
static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
{
return container_of(hw, struct inno_dsidphy, pll.hw);
@@ -290,31 +371,15 @@ static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{
struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
- const struct {
- unsigned long rate;
- u8 hs_prepare;
- u8 clk_lane_hs_zero;
- u8 data_lane_hs_zero;
- u8 hs_trail;
- } timings[] = {
- { 110000000, 0x20, 0x16, 0x02, 0x22},
- { 150000000, 0x06, 0x16, 0x03, 0x45},
- { 200000000, 0x18, 0x17, 0x04, 0x0b},
- { 250000000, 0x05, 0x17, 0x05, 0x16},
- { 300000000, 0x51, 0x18, 0x06, 0x2c},
- { 400000000, 0x64, 0x19, 0x07, 0x33},
- { 500000000, 0x20, 0x1b, 0x07, 0x4e},
- { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
- { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
- { 800000000, 0x21, 0x1f, 0x09, 0x29},
- {1000000000, 0x09, 0x20, 0x09, 0x27},
- };
+ const struct inno_mipi_dphy_timing *timings;
u32 t_txbyteclkhs, t_txclkesc;
u32 txbyteclkhs, txclkesc, esc_clk_div;
u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
unsigned int i;
+ timings = inno->pdata->inno_mipi_dphy_timing_table;
+
inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
/* Select MIPI mode */
@@ -327,6 +392,13 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ if (inno->pdata->max_rate == MAX_2_5GHZ) {
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ PLL_POST_DIV_ENABLE_MASK, PLL_POST_DIV_ENABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x0b,
+ CLOCK_LANE_VOD_RANGE_SET_MASK,
+ CLOCK_LANE_VOD_RANGE_SET(VOD_MAX_RANGE));
+ }
/* Enable PLL and LDO */
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
REG_LDOPD_MASK | REG_PLLPD_MASK,
@@ -368,14 +440,6 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
- * The value of counter for HS Tlpx Time
- * Tlpx = Tpin_txbyteclkhs * (2 + value)
- */
- lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
- if (lpx >= 2)
- lpx -= 2;
-
- /*
* The value of counter for HS Tta-go
* Tta-go for turnaround
* Tta-go = Ttxclkesc * value
@@ -394,13 +458,24 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
*/
ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
- for (i = 0; i < ARRAY_SIZE(timings); i++)
+ for (i = 0; i < inno->pdata->num_timings; i++)
if (inno->pll.rate <= timings[i].rate)
break;
- if (i == ARRAY_SIZE(timings))
+ if (i == inno->pdata->num_timings)
--i;
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ if (inno->pdata->max_rate == MAX_1GHZ) {
+ lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+ } else
+ lpx = timings[i].lpx;
+
hs_prepare = timings[i].hs_prepare;
hs_trail = timings[i].hs_trail;
clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
@@ -417,14 +492,23 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
T_LPX_CNT(lpx));
phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
T_HS_PREPARE_CNT(hs_prepare));
- phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
- T_HS_ZERO_CNT(hs_zero));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x06, T_HS_ZERO_CNT_HI_MASK,
+ T_HS_ZERO_CNT_HI(hs_zero >> 6));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_LO_MASK,
+ T_HS_ZERO_CNT_LO(hs_zero));
phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
T_HS_TRAIL_CNT(hs_trail));
- phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
- T_HS_EXIT_CNT(hs_exit));
- phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
- T_CLK_POST_CNT(clk_post));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x11, T_HS_EXIT_CNT_HI_MASK,
+ T_HS_EXIT_CNT_HI(hs_exit >> 5));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_LO_MASK,
+ T_HS_EXIT_CNT_LO(hs_exit));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x10, T_CLK_POST_CNT_HI_MASK,
+ T_CLK_POST_CNT_HI(clk_post >> 4));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_LO_MASK,
+ T_CLK_POST_CNT_LO(clk_post));
phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
T_CLK_PRE_CNT(clk_pre));
phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
@@ -452,8 +536,9 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
/* Sample clock reverse direction */
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
- SAMPLE_CLOCK_DIRECTION_MASK,
- SAMPLE_CLOCK_DIRECTION_REVERSE);
+ SAMPLE_CLOCK_DIRECTION_MASK | LOWFRE_EN_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE |
+ PLL_OUTPUT_FREQUENCY_DIV_BY_1);
/* Select LVDS mode */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
@@ -473,6 +558,10 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
msleep(20);
+ /* Select PLL mode */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x1e,
+ PLL_MODE_SEL_MASK, PLL_MODE_SEL_LVDS_MODE);
+
/* Reset LVDS digital logic */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
LVDS_DIGITAL_INTERNAL_RESET_MASK,
@@ -592,6 +681,18 @@ static const struct phy_ops inno_dsidphy_ops = {
.owner = THIS_MODULE,
};
+static const struct inno_video_phy_plat_data max_1ghz_video_phy_plat_data = {
+ .inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_1ghz,
+ .num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_1ghz),
+ .max_rate = MAX_1GHZ,
+};
+
+static const struct inno_video_phy_plat_data max_2_5ghz_video_phy_plat_data = {
+ .inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_2_5ghz,
+ .num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_2_5ghz),
+ .max_rate = MAX_2_5GHZ,
+};
+
static int inno_dsidphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -605,6 +706,7 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
return -ENOMEM;
inno->dev = dev;
+ inno->pdata = of_device_get_match_data(inno->dev);
platform_set_drvdata(pdev, inno);
inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
@@ -663,9 +765,19 @@ static int inno_dsidphy_remove(struct platform_device *pdev)
}
static const struct of_device_id inno_dsidphy_of_match[] = {
- { .compatible = "rockchip,px30-dsi-dphy", },
- { .compatible = "rockchip,rk3128-dsi-dphy", },
- { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {
+ .compatible = "rockchip,px30-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3128-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3368-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3568-dsi-dphy",
+ .data = &max_2_5ghz_video_phy_plat_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 0b1e9337ee8e..e6ededc51523 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1124,7 +1124,7 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
{
- int ret;
+ int ret, id;
rport->port_id = USB2PHY_PORT_OTG;
rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
@@ -1162,13 +1162,15 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
ret = devm_extcon_register_notifier(rphy->dev, rphy->edev,
EXTCON_USB_HOST, &rport->event_nb);
- if (ret)
+ if (ret) {
dev_err(rphy->dev, "register USB HOST notifier failed\n");
+ goto out;
+ }
if (!of_property_read_bool(rphy->dev->of_node, "extcon")) {
/* do initial sync of usb state */
- ret = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
- extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !ret);
+ id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+ extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
}
}
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
new file mode 100644
index 000000000000..1d355b32ba55
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip PCIE3.0 phy driver
+ *
+ * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Register for RK3568 */
+#define GRF_PCIE30PHY_CON1 0x4
+#define GRF_PCIE30PHY_CON6 0x18
+#define GRF_PCIE30PHY_CON9 0x24
+#define GRF_PCIE30PHY_DA_OCM (BIT(15) | BIT(31))
+#define GRF_PCIE30PHY_STATUS0 0x80
+#define GRF_PCIE30PHY_WR_EN (0xf << 16)
+#define SRAM_INIT_DONE(reg) (reg & BIT(14))
+
+#define RK3568_BIFURCATION_LANE_0_1 BIT(0)
+
+/* Register for RK3588 */
+#define PHP_GRF_PCIESEL_CON 0x100
+#define RK3588_PCIE3PHY_GRF_CMN_CON0 0x0
+#define RK3588_PCIE3PHY_GRF_PHY0_STATUS1 0x904
+#define RK3588_PCIE3PHY_GRF_PHY1_STATUS1 0xa04
+#define RK3588_SRAM_INIT_DONE(reg) (reg & BIT(0))
+
+#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
+#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
+#define RK3588_LANE_AGGREGATION BIT(2)
+
+struct rockchip_p3phy_ops;
+
+struct rockchip_p3phy_priv {
+ const struct rockchip_p3phy_ops *ops;
+ void __iomem *mmio;
+ /* mode: RC, EP */
+ int mode;
+ /* pcie30_phymode: Aggregation, Bifurcation */
+ int pcie30_phymode;
+ struct regmap *phy_grf;
+ struct regmap *pipe_grf;
+ struct reset_control *p30phy;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int num_lanes;
+ u32 lanes[4];
+};
+
+struct rockchip_p3phy_ops {
+ int (*phy_init)(struct rockchip_p3phy_priv *priv);
+};
+
+static int rockchip_p3phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+
+ /* Actually We don't care EP/RC mode, but just record it */
+ switch (submode) {
+ case PHY_MODE_PCIE_RC:
+ priv->mode = PHY_MODE_PCIE_RC;
+ break;
+ case PHY_MODE_PCIE_EP:
+ priv->mode = PHY_MODE_PCIE_EP;
+ break;
+ default:
+ dev_err(&phy->dev, "%s, invalid mode\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_p3phy_rk3568_init(struct rockchip_p3phy_priv *priv)
+{
+ struct phy *phy = priv->phy;
+ bool bifurcation = false;
+ int ret;
+ u32 reg;
+
+ /* Deassert PCIe PMA output clamp mode */
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON9, GRF_PCIE30PHY_DA_OCM);
+
+ for (int i = 0; i < priv->num_lanes; i++) {
+ dev_info(&phy->dev, "lane number %d, val %d\n", i, priv->lanes[i]);
+ if (priv->lanes[i] > 1)
+ bifurcation = true;
+ }
+
+ /* Set bifurcation if needed, and it doesn't care RC/EP */
+ if (bifurcation) {
+ dev_info(&phy->dev, "bifurcation enabled\n");
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON6,
+ GRF_PCIE30PHY_WR_EN | RK3568_BIFURCATION_LANE_0_1);
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON1,
+ GRF_PCIE30PHY_DA_OCM);
+ } else {
+ dev_dbg(&phy->dev, "bifurcation disabled\n");
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON6,
+ GRF_PCIE30PHY_WR_EN & ~RK3568_BIFURCATION_LANE_0_1);
+ }
+
+ reset_control_deassert(priv->p30phy);
+
+ ret = regmap_read_poll_timeout(priv->phy_grf,
+ GRF_PCIE30PHY_STATUS0,
+ reg, SRAM_INIT_DONE(reg),
+ 0, 500);
+ if (ret)
+ dev_err(&priv->phy->dev, "%s: lock failed 0x%x, check input refclk and power supply\n",
+ __func__, reg);
+ return ret;
+}
+
+static const struct rockchip_p3phy_ops rk3568_ops = {
+ .phy_init = rockchip_p3phy_rk3568_init,
+};
+
+static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+{
+ u32 reg = 0;
+ u8 mode = 0;
+ int ret;
+
+ /* Deassert PCIe PMA output clamp mode */
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, BIT(8) | BIT(24));
+
+ /* Set bifurcation if needed */
+ for (int i = 0; i < priv->num_lanes; i++) {
+ if (!priv->lanes[i])
+ mode |= (BIT(i) << 3);
+
+ if (priv->lanes[i] > 1)
+ mode |= (BIT(i) >> 1);
+ }
+
+ if (!mode)
+ reg = RK3588_LANE_AGGREGATION;
+ else {
+ if (mode & (BIT(0) | BIT(1)))
+ reg |= RK3588_BIFURCATION_LANE_0_1;
+
+ if (mode & (BIT(2) | BIT(3)))
+ reg |= RK3588_BIFURCATION_LANE_2_3;
+ }
+
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+
+ /* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
+ if (!IS_ERR(priv->pipe_grf)) {
+ reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ if (reg)
+ regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
+ (reg << 16) | reg);
+ }
+
+ reset_control_deassert(priv->p30phy);
+
+ ret = regmap_read_poll_timeout(priv->phy_grf,
+ RK3588_PCIE3PHY_GRF_PHY0_STATUS1,
+ reg, RK3588_SRAM_INIT_DONE(reg),
+ 0, 500);
+ ret |= regmap_read_poll_timeout(priv->phy_grf,
+ RK3588_PCIE3PHY_GRF_PHY1_STATUS1,
+ reg, RK3588_SRAM_INIT_DONE(reg),
+ 0, 500);
+ if (ret)
+ dev_err(&priv->phy->dev, "lock failed 0x%x, check input refclk and power supply\n",
+ reg);
+ return ret;
+}
+
+static const struct rockchip_p3phy_ops rk3588_ops = {
+ .phy_init = rockchip_p3phy_rk3588_init,
+};
+
+static int rochchip_p3phy_init(struct phy *phy)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks);
+ if (ret) {
+ dev_err(&priv->phy->dev, "failed to enable PCIe bulk clks %d\n", ret);
+ return ret;
+ }
+
+ reset_control_assert(priv->p30phy);
+ udelay(1);
+
+ if (priv->ops->phy_init) {
+ ret = priv->ops->phy_init(priv);
+ if (ret)
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+ }
+
+ return ret;
+}
+
+static int rochchip_p3phy_exit(struct phy *phy)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+ reset_control_assert(priv->p30phy);
+ return 0;
+}
+
+static const struct phy_ops rochchip_p3phy_ops = {
+ .init = rochchip_p3phy_init,
+ .exit = rochchip_p3phy_exit,
+ .set_mode = rockchip_p3phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_p3phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct rockchip_p3phy_priv *priv;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(priv->mmio)) {
+ ret = PTR_ERR(priv->mmio);
+ return ret;
+ }
+
+ priv->ops = of_device_get_match_data(&pdev->dev);
+ if (!priv->ops) {
+ dev_err(dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ priv->phy_grf = syscon_regmap_lookup_by_phandle(np, "rockchip,phy-grf");
+ if (IS_ERR(priv->phy_grf)) {
+ dev_err(dev, "failed to find rockchip,phy_grf regmap\n");
+ return PTR_ERR(priv->phy_grf);
+ }
+
+ if (of_device_is_compatible(np, "rockchip,rk3588-pcie3-phy")) {
+ priv->pipe_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,pipe-grf");
+ if (IS_ERR(priv->pipe_grf))
+ dev_info(dev, "failed to find rockchip,pipe_grf regmap\n");
+ } else {
+ priv->pipe_grf = NULL;
+ }
+
+ priv->num_lanes = of_property_read_variable_u32_array(dev->of_node, "data-lanes",
+ priv->lanes, 2,
+ ARRAY_SIZE(priv->lanes));
+
+ /* if no data-lanes assume aggregation */
+ if (priv->num_lanes == -EINVAL) {
+ dev_dbg(dev, "no data-lanes property found\n");
+ priv->num_lanes = 1;
+ priv->lanes[0] = 1;
+ } else if (priv->num_lanes < 0) {
+ dev_err(dev, "failed to read data-lanes property %d\n", priv->num_lanes);
+ return priv->num_lanes;
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &rochchip_p3phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create combphy\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ priv->p30phy = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(priv->p30phy)) {
+ return dev_err_probe(dev, PTR_ERR(priv->p30phy),
+ "failed to get phy reset control\n");
+ }
+ if (!priv->p30phy)
+ dev_info(dev, "no phy reset control specified\n");
+
+ priv->num_clks = devm_clk_bulk_get_all(dev, &priv->clks);
+ if (priv->num_clks < 1)
+ return -ENODEV;
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id rockchip_p3phy_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie3-phy", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-pcie3-phy", .data = &rk3588_ops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchip_p3phy_of_match);
+
+static struct platform_driver rockchip_p3phy_driver = {
+ .probe = rockchip_p3phy_probe,
+ .driver = {
+ .name = "rockchip-snps-pcie3-phy",
+ .of_match_table = rockchip_p3phy_of_match,
+ },
+};
+module_platform_driver(rockchip_p3phy_driver);
+MODULE_DESCRIPTION("Rockchip Synopsys PCIe 3.0 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/sunplus/Kconfig b/drivers/phy/sunplus/Kconfig
new file mode 100644
index 000000000000..3bd3cfb53a63
--- /dev/null
+++ b/drivers/phy/sunplus/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PHY_SUNPLUS_USB
+ tristate "Sunplus SP7021 USB 2.0 PHY driver"
+ depends on OF && (SOC_SP7021 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support the USB 2.0 PHY on Sunplus SP7021
+ SoC. The USB 2.0 PHY controller supports battery charger
+ and synchronous signals, various power down modes including
+ operating, partial and suspend modes, and high-speed,
+ full-speed and low-speed data transfer.
diff --git a/drivers/phy/sunplus/Makefile b/drivers/phy/sunplus/Makefile
new file mode 100644
index 000000000000..71754d5cb545
--- /dev/null
+++ b/drivers/phy/sunplus/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_SUNPLUS_USB) += phy-sunplus-usb2.o
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
new file mode 100644
index 000000000000..b932087c55b2
--- /dev/null
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Sunplus SP7021 USB 2.0 phy driver
+ *
+ * Copyright (C) 2022 Sunplus Technology Inc., All rights reserved.
+ *
+ * Note 1 : non-posted write command for the registers accesses of
+ * Sunplus SP7021.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define HIGH_MASK_BITS GENMASK(31, 16)
+#define LOW_MASK_BITS GENMASK(15, 0)
+#define OTP_DISC_LEVEL_DEFAULT 0xd
+
+/* GROUP UPHY */
+#define CONFIG1 0x4
+#define J_HS_TX_PWRSAV BIT(5)
+#define CONFIG3 0xc
+#define J_FORCE_DISC_ON BIT(5)
+#define J_DEBUG_CTRL_ADDR_MACRO BIT(0)
+#define CONFIG7 0x1c
+#define J_DISC 0X1f
+#define CONFIG9 0x24
+#define J_ECO_PATH BIT(6)
+#define CONFIG16 0x40
+#define J_TBCWAIT_MASK GENMASK(6, 5)
+#define J_TBCWAIT_1P1_MS FIELD_PREP(J_TBCWAIT_MASK, 0)
+#define J_TVDM_SRC_DIS_MASK GENMASK(4, 3)
+#define J_TVDM_SRC_DIS_8P2_MS FIELD_PREP(J_TVDM_SRC_DIS_MASK, 3)
+#define J_TVDM_SRC_EN_MASK GENMASK(2, 1)
+#define J_TVDM_SRC_EN_1P6_MS FIELD_PREP(J_TVDM_SRC_EN_MASK, 0)
+#define J_BC_EN BIT(0)
+#define CONFIG17 0x44
+#define IBG_TRIM0_MASK GENMASK(7, 5)
+#define IBG_TRIM0_SSLVHT FIELD_PREP(IBG_TRIM0_MASK, 4)
+#define J_VDATREE_TRIM_MASK GENMASK(4, 1)
+#define J_VDATREE_TRIM_DEFAULT FIELD_PREP(J_VDATREE_TRIM_MASK, 9)
+#define CONFIG23 0x5c
+#define PROB_MASK GENMASK(5, 3)
+#define PROB FIELD_PREP(PROB_MASK, 7)
+
+/* GROUP MOON4 */
+#define UPHY_CONTROL0 0x0
+#define UPHY_CONTROL1 0x4
+#define UPHY_CONTROL2 0x8
+#define MO1_UPHY_RX_CLK_SEL BIT(6)
+#define MASK_MO1_UPHY_RX_CLK_SEL BIT(6 + 16)
+#define UPHY_CONTROL3 0xc
+#define MO1_UPHY_PLL_POWER_OFF_SEL BIT(7)
+#define MASK_MO1_UPHY_PLL_POWER_OFF_SEL BIT(7 + 16)
+#define MO1_UPHY_PLL_POWER_OFF BIT(3)
+#define MASK_UPHY_PLL_POWER_OFF BIT(3 + 16)
+
+struct sp_usbphy {
+ struct device *dev;
+ struct resource *phy_res_mem;
+ struct resource *moon4_res_mem;
+ struct reset_control *rstc;
+ struct clk *phy_clk;
+ void __iomem *phy_regs;
+ void __iomem *moon4_regs;
+ u32 disc_vol_addr_off;
+};
+
+static int update_disc_vol(struct sp_usbphy *usbphy)
+{
+ struct nvmem_cell *cell;
+ char *disc_name = "disc_vol";
+ ssize_t otp_l = 0;
+ char *otp_v;
+ u32 val, set;
+
+ cell = nvmem_cell_get(usbphy->dev, disc_name);
+ if (IS_ERR_OR_NULL(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
+
+ otp_v = nvmem_cell_read(cell, &otp_l);
+ nvmem_cell_put(cell);
+
+ if (!IS_ERR(otp_v)) {
+ set = *(otp_v + 1);
+ set = (set << (sizeof(char) * 8)) | *otp_v;
+ set = (set >> usbphy->disc_vol_addr_off) & J_DISC;
+ }
+
+ if (IS_ERR(otp_v) || set == 0)
+ set = OTP_DISC_LEVEL_DEFAULT;
+
+ val = readl(usbphy->phy_regs + CONFIG7);
+ val = (val & ~J_DISC) | set;
+ writel(val, usbphy->phy_regs + CONFIG7);
+
+ return 0;
+}
+
+static int sp_uphy_init(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(usbphy->phy_clk);
+ if (ret)
+ goto err_clk;
+
+ ret = reset_control_deassert(usbphy->rstc);
+ if (ret)
+ goto err_reset;
+
+ /* Default value modification */
+ writel(HIGH_MASK_BITS | 0x4002, usbphy->moon4_regs + UPHY_CONTROL0);
+ writel(HIGH_MASK_BITS | 0x8747, usbphy->moon4_regs + UPHY_CONTROL1);
+
+ /* disconnect voltage */
+ ret = update_disc_vol(usbphy);
+ if (ret < 0)
+ return ret;
+
+ /* board uphy 0 internal register modification for tid certification */
+ val = readl(usbphy->phy_regs + CONFIG9);
+ val &= ~(J_ECO_PATH);
+ writel(val, usbphy->phy_regs + CONFIG9);
+
+ val = readl(usbphy->phy_regs + CONFIG1);
+ val &= ~(J_HS_TX_PWRSAV);
+ writel(val, usbphy->phy_regs + CONFIG1);
+
+ val = readl(usbphy->phy_regs + CONFIG23);
+ val = (val & ~PROB) | PROB;
+ writel(val, usbphy->phy_regs + CONFIG23);
+
+ /* port 0 uphy clk fix */
+ writel(MASK_MO1_UPHY_RX_CLK_SEL | MO1_UPHY_RX_CLK_SEL,
+ usbphy->moon4_regs + UPHY_CONTROL2);
+
+ /* battery charger */
+ writel(J_TBCWAIT_1P1_MS | J_TVDM_SRC_DIS_8P2_MS | J_TVDM_SRC_EN_1P6_MS | J_BC_EN,
+ usbphy->phy_regs + CONFIG16);
+ writel(IBG_TRIM0_SSLVHT | J_VDATREE_TRIM_DEFAULT, usbphy->phy_regs + CONFIG17);
+
+ /* chirp mode */
+ writel(J_FORCE_DISC_ON | J_DEBUG_CTRL_ADDR_MACRO, usbphy->phy_regs + CONFIG3);
+
+ return 0;
+
+err_reset:
+ reset_control_assert(usbphy->rstc);
+err_clk:
+ clk_disable_unprepare(usbphy->phy_clk);
+
+ return ret;
+}
+
+static int sp_uphy_power_on(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 pll_pwr_on, pll_pwr_off;
+
+ /* PLL power off/on twice */
+ pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
+ pll_pwr_on = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL;
+
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+
+ return 0;
+}
+
+static int sp_uphy_power_off(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 pll_pwr_off;
+
+ pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
+
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+
+ return 0;
+}
+
+static int sp_uphy_exit(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+
+ reset_control_assert(usbphy->rstc);
+ clk_disable_unprepare(usbphy->phy_clk);
+
+ return 0;
+}
+
+static const struct phy_ops sp_uphy_ops = {
+ .init = sp_uphy_init,
+ .power_on = sp_uphy_power_on,
+ .power_off = sp_uphy_power_off,
+ .exit = sp_uphy_exit,
+};
+
+static const struct of_device_id sp_uphy_dt_ids[] = {
+ {.compatible = "sunplus,sp7021-usb2-phy", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sp_uphy_dt_ids);
+
+static int sp_usb_phy_probe(struct platform_device *pdev)
+{
+ struct sp_usbphy *usbphy;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ usbphy = devm_kzalloc(&pdev->dev, sizeof(*usbphy), GFP_KERNEL);
+ if (!usbphy)
+ return -ENOMEM;
+
+ usbphy->dev = &pdev->dev;
+
+ usbphy->phy_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ usbphy->phy_regs = devm_ioremap_resource(&pdev->dev, usbphy->phy_res_mem);
+ if (IS_ERR(usbphy->phy_regs))
+ return PTR_ERR(usbphy->phy_regs);
+
+ usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
+ usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
+ resource_size(usbphy->moon4_res_mem));
+ if (IS_ERR(usbphy->moon4_regs))
+ return PTR_ERR(usbphy->moon4_regs);
+
+ usbphy->phy_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usbphy->phy_clk))
+ return PTR_ERR(usbphy->phy_clk);
+
+ usbphy->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(usbphy->rstc))
+ return PTR_ERR(usbphy->rstc);
+
+ of_property_read_u32(pdev->dev.of_node, "sunplus,disc-vol-addr-off",
+ &usbphy->disc_vol_addr_off);
+
+ phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
+ if (IS_ERR(phy)) {
+ ret = -PTR_ERR(phy);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, usbphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver sunplus_usb_phy_driver = {
+ .probe = sp_usb_phy_probe,
+ .driver = {
+ .name = "sunplus-usb2-phy",
+ .of_match_table = sp_uphy_dt_ids,
+ },
+};
+module_platform_driver(sunplus_usb_phy_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.shih@sunplus.com>");
+MODULE_DESCRIPTION("Sunplus USB 2.0 phy driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index ae3915ed9fef..0996ede63387 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -638,7 +638,7 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
mutex_unlock(&padctl->lock);
}
-static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
+static void tegra186_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
@@ -656,6 +656,8 @@ static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
return;
}
+ dev_dbg(dev, "power on UTMI pad %u\n", index);
+
tegra186_utmi_bias_pad_power_on(padctl);
udelay(2);
@@ -669,7 +671,7 @@ static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
}
-static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
+static void tegra186_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
@@ -679,6 +681,8 @@ static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
if (!phy)
return;
+ dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value |= USB2_OTG_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -849,15 +853,14 @@ static int tegra186_utmi_phy_power_on(struct phy *phy)
value |= RPD_CTRL(priv->calib.rpd_ctrl);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
- /* TODO: pad power saving */
- tegra_phy_xusb_utmi_pad_power_on(phy);
+ tegra186_utmi_pad_power_on(phy);
+
return 0;
}
static int tegra186_utmi_phy_power_off(struct phy *phy)
{
- /* TODO: pad power saving */
- tegra_phy_xusb_utmi_pad_power_down(phy);
+ tegra186_utmi_pad_power_down(phy);
return 0;
}
@@ -1381,12 +1384,9 @@ tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
return -ENOMEM;
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to read calibration fuse: %d\n",
- err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to read calibration fuse\n");
dev_dbg(dev, "FUSE_USB_CALIB_0 %#x\n", value);
@@ -1486,6 +1486,8 @@ static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.suspend_noirq = tegra186_xusb_padctl_suspend_noirq,
.resume_noirq = tegra186_xusb_padctl_resume_noirq,
.vbus_override = tegra186_xusb_padctl_vbus_override,
+ .utmi_pad_power_on = tegra186_utmi_pad_power_on,
+ .utmi_pad_power_down = tegra186_utmi_pad_power_down,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index aa5237eacd29..95091876c422 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -656,6 +656,7 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
struct usb_role_switch_desc role_sx_desc = {
.fwnode = dev_fwnode(&port->dev),
.set = tegra_xusb_role_sw_set,
+ .allow_userspace_control = true,
};
int err = 0;
@@ -1270,7 +1271,7 @@ static int tegra_xusb_padctl_remove(struct platform_device *pdev)
padctl->soc->ops->remove(padctl);
- return err;
+ return 0;
}
static __maybe_unused int tegra_xusb_padctl_suspend_noirq(struct device *dev)
@@ -1458,6 +1459,26 @@ int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_pad_power_on)
+ padctl->soc->ops->utmi_pad_power_on(phy);
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_on);
+
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_pad_power_down)
+ padctl->soc->ops->utmi_pad_power_down(phy);
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_down);
+
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port)
{
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 034f7a2c28d6..8cfbbdbd6e0c 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
@@ -412,6 +412,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool enable);
int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
int (*utmi_port_reset)(struct phy *phy);
+ void (*utmi_pad_power_on)(struct phy *phy);
+ void (*utmi_pad_power_down)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index d0ab69750c6b..0bcfd6d96b4d 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -22,6 +22,12 @@
#define AM33XX_GMII_SEL_MODE_RMII 1
#define AM33XX_GMII_SEL_MODE_RGMII 2
+/* J72xx SoC specific definitions for the CONTROL port */
+#define J72XX_GMII_SEL_MODE_QSGMII 4
+#define J72XX_GMII_SEL_MODE_QSGMII_SUB 6
+
+#define PHY_GMII_PORT(n) BIT((n) - 1)
+
enum {
PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
@@ -43,6 +49,7 @@ struct phy_gmii_sel_soc_data {
u32 features;
const struct reg_field (*regfields)[PHY_GMII_SEL_LAST];
bool use_of_data;
+ u64 extra_modes;
};
struct phy_gmii_sel_priv {
@@ -53,6 +60,7 @@ struct phy_gmii_sel_priv {
struct phy_gmii_sel_phy_priv *if_phys;
u32 num_ports;
u32 reg_offset;
+ u32 qsgmii_main_ports;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -88,10 +96,17 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_QSGMII)))
+ goto unsupported;
+ if (if_phy->priv->qsgmii_main_ports & BIT(if_phy->id - 1))
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII_SUB;
+ break;
+
default:
- dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
- if_phy->id, phy_modes(submode));
- return -EINVAL;
+ goto unsupported;
}
if_phy->phy_if_mode = submode;
@@ -123,6 +138,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
}
return 0;
+
+unsupported:
+ dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
+ if_phy->id, phy_modes(submode));
+ return -EINVAL;
}
static const
@@ -188,6 +208,13 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
.regfields = phy_gmii_sel_fields_am654,
};
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id phy_gmii_sel_id_table[] = {
{
.compatible = "ti,am3352-phy-gmii-sel",
@@ -209,6 +236,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = {
.compatible = "ti,am654-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am654,
},
+ {
+ .compatible = "ti,j7200-cpsw5g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw5g_soc_j7200,
+ },
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
@@ -350,6 +381,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
const struct of_device_id *of_id;
struct phy_gmii_sel_priv *priv;
+ u32 main_ports = 1;
int ret;
of_id = of_match_node(phy_gmii_sel_id_table, pdev->dev.of_node);
@@ -363,6 +395,15 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->soc_data = of_id->data;
priv->num_ports = priv->soc_data->num_ports;
+ of_property_read_u32(node, "ti,qsgmii-main-ports", &main_ports);
+ /*
+ * Ensure that main_ports is within bounds. If the property
+ * ti,qsgmii-main-ports is not mentioned, or the value mentioned
+ * is out of bounds, default to 1.
+ */
+ if (main_ports < 1 || main_ports > 4)
+ main_ports = 1;
+ priv->qsgmii_main_ports = PHY_GMII_PORT(main_ports);
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 70bac931f99a..41725c6bcdf6 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/mux/consumer.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -23,6 +24,15 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#define REF_CLK_19_2MHZ 19200000
+#define REF_CLK_25MHZ 25000000
+#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
+
+/* SCM offsets */
+#define SERDES_SUP_CTRL 0x4400
+
+/* SERDES offsets */
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
#define WIZ_SERDES_RST 0x40c
@@ -85,6 +95,18 @@ static const struct reg_field pma_cmn_refclk_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
static const struct reg_field pma_cmn_refclk1_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field sup_pll0_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 0, 1);
+static const struct reg_field sup_pll1_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 2, 3);
+static const struct reg_field sup_pma_cmn_refclk1_int_mode =
+ REG_FIELD(SERDES_SUP_CTRL, 4, 5);
+static const struct reg_field sup_refclk_dig_sel_10g =
+ REG_FIELD(SERDES_SUP_CTRL, 6, 7);
+static const struct reg_field sup_legacy_clk_override =
+ REG_FIELD(SERDES_SUP_CTRL, 8, 8);
+
static const char * const output_clk_names[] = {
[TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
[TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
@@ -129,6 +151,26 @@ static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 22, 23),
};
+static const struct reg_field p0_mac_src_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 20, 21),
+ REG_FIELD(WIZ_LANECTL(1), 20, 21),
+ REG_FIELD(WIZ_LANECTL(2), 20, 21),
+ REG_FIELD(WIZ_LANECTL(3), 20, 21),
+};
+
+static const struct reg_field p0_rxfclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 6, 7),
+ REG_FIELD(WIZ_LANECTL(1), 6, 7),
+ REG_FIELD(WIZ_LANECTL(2), 6, 7),
+ REG_FIELD(WIZ_LANECTL(3), 6, 7),
+};
+
+static const struct reg_field p0_refclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 18, 19),
+ REG_FIELD(WIZ_LANECTL(1), 18, 19),
+ REG_FIELD(WIZ_LANECTL(2), 18, 19),
+ REG_FIELD(WIZ_LANECTL(3), 18, 19),
+};
static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANEDIV(0), 16, 22),
REG_FIELD(WIZ_LANEDIV(1), 16, 22),
@@ -228,6 +270,27 @@ static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
},
};
+static const struct wiz_clk_mux_sel clk_mux_sel_10g_2_refclk[] = {
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
static const struct clk_div_table clk_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
@@ -249,14 +312,18 @@ static const struct wiz_clk_div_sel clk_div_sel[] = {
enum wiz_type {
J721E_WIZ_16G,
- J721E_WIZ_10G,
+ J721E_WIZ_10G, /* Also for J7200 SR1.0 */
AM64_WIZ_10G,
+ J7200_WIZ_10G, /* J7200 SR2.0 */
};
struct wiz_data {
enum wiz_type type;
+ const struct reg_field *pll0_refclk_mux_sel;
+ const struct reg_field *pll1_refclk_mux_sel;
const struct reg_field *refclk_dig_sel;
const struct reg_field *pma_cmn_refclk1_dig_div;
+ const struct reg_field *pma_cmn_refclk1_int_mode;
const struct wiz_clk_mux_sel *clk_mux_sel;
unsigned int clk_div_sel_num;
};
@@ -266,6 +333,7 @@ struct wiz_data {
struct wiz {
struct regmap *regmap;
+ struct regmap *scm_regmap;
enum wiz_type type;
const struct wiz_clk_mux_sel *clk_mux_sel;
const struct wiz_clk_div_sel *clk_div_sel;
@@ -280,13 +348,18 @@ struct wiz {
struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES];
struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES];
struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES];
+ struct regmap_field *p0_mac_src_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_rxfclk_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_refclk_sel[WIZ_MAX_LANES];
struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk1_int_mode;
struct regmap_field *pma_cmn_refclk_mode;
struct regmap_field *pma_cmn_refclk_dig_div;
struct regmap_field *pma_cmn_refclk1_dig_div;
struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
+ struct regmap_field *sup_legacy_clk_override;
struct device *dev;
u32 num_lanes;
@@ -325,7 +398,9 @@ static int wiz_p_mac_div_sel(struct wiz *wiz)
int i;
for (i = 0; i < num_lanes; i++) {
- if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_SGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_QSGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
if (ret)
return ret;
@@ -354,6 +429,13 @@ static int wiz_mode_select(struct wiz *wiz)
else
continue;
+ if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
+ mode = LANE_MODE_GEN1;
+ }
+
ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
return ret;
@@ -416,6 +498,7 @@ static int wiz_init(struct wiz *wiz)
static int wiz_regfield_init(struct wiz *wiz)
{
struct regmap *regmap = wiz->regmap;
+ struct regmap *scm_regmap = wiz->regmap; /* updated later to scm_regmap if applicable */
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
const struct wiz_data *data = wiz->data;
@@ -465,27 +548,46 @@ static int wiz_regfield_init(struct wiz *wiz)
}
}
+ if (wiz->scm_regmap) {
+ scm_regmap = wiz->scm_regmap;
+ wiz->sup_legacy_clk_override =
+ devm_regmap_field_alloc(dev, scm_regmap, sup_legacy_clk_override);
+ if (IS_ERR(wiz->sup_legacy_clk_override)) {
+ dev_err(dev, "SUP_LEGACY_CLK_OVERRIDE reg field init failed\n");
+ return PTR_ERR(wiz->sup_legacy_clk_override);
+ }
+ }
+
wiz->mux_sel_field[PLL0_REFCLK] =
- devm_regmap_field_alloc(dev, regmap, pll0_refclk_mux_sel);
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll0_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
}
wiz->mux_sel_field[PLL1_REFCLK] =
- devm_regmap_field_alloc(dev, regmap, pll1_refclk_mux_sel);
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll1_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
- wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, regmap,
+ wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, scm_regmap,
*data->refclk_dig_sel);
if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
}
+ if (data->pma_cmn_refclk1_int_mode) {
+ wiz->pma_cmn_refclk1_int_mode =
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pma_cmn_refclk1_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk1_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk1_int_mode);
+ }
+ }
+
for (i = 0; i < num_lanes; i++) {
wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
p_enable[i]);
@@ -523,6 +625,24 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->p0_fullrt_div[i]);
}
+ wiz->p0_mac_src_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_mac_src_sel[i]);
+ if (IS_ERR(wiz->p0_mac_src_sel[i])) {
+ dev_err(dev, "P%d_MAC_SRC_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_mac_src_sel[i]);
+ }
+
+ wiz->p0_rxfclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_rxfclk_sel[i]);
+ if (IS_ERR(wiz->p0_rxfclk_sel[i])) {
+ dev_err(dev, "P%d_RXFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_rxfclk_sel[i]);
+ }
+
+ wiz->p0_refclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_refclk_sel[i]);
+ if (IS_ERR(wiz->p0_refclk_sel[i])) {
+ dev_err(dev, "P%d_REFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_refclk_sel[i]);
+ }
+
wiz->p_mac_div_sel0[i] =
devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
if (IS_ERR(wiz->p_mac_div_sel0[i])) {
@@ -597,6 +717,8 @@ static int wiz_phy_en_refclk_register(struct wiz *wiz)
struct device *dev = wiz->dev;
struct clk_init_data *init;
struct clk *clk;
+ char *clk_name;
+ unsigned int sz;
wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
if (!wiz_phy_en_refclk)
@@ -606,12 +728,23 @@ static int wiz_phy_en_refclk_register(struct wiz *wiz)
init->ops = &wiz_phy_en_refclk_ops;
init->flags = 0;
- init->name = output_clk_names[TI_WIZ_PHY_EN_REFCLK];
+
+ sz = strlen(dev_name(dev)) + strlen(output_clk_names[TI_WIZ_PHY_EN_REFCLK]) + 2;
+
+ clk_name = kzalloc(sz, GFP_KERNEL);
+ if (!clk_name)
+ return -ENOMEM;
+
+ snprintf(clk_name, sz, "%s_%s", dev_name(dev), output_clk_names[TI_WIZ_PHY_EN_REFCLK]);
+ init->name = clk_name;
wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
wiz_phy_en_refclk->hw.init = init;
clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
+
+ kfree(clk_name);
+
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -856,9 +989,13 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
struct device_node *clk_node;
int i;
- if (wiz->type == AM64_WIZ_10G) {
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
of_clk_del_provider(dev->of_node);
return;
+ default:
+ break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
@@ -885,9 +1022,6 @@ static int wiz_clock_register(struct wiz *wiz)
int ret;
int i;
- if (wiz->type != AM64_WIZ_10G)
- return 0;
-
clk_index = TI_WIZ_PLL0_REFCLK;
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
@@ -937,6 +1071,41 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
+ switch (rate) {
+ case REF_CLK_100MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x2);
+ break;
+ case REF_CLK_156_25MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x3);
+ break;
+ default:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (wiz->data->pma_cmn_refclk1_int_mode) {
+ clk = devm_clk_get(dev, "core_ref1_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref1_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3);
+ }
+
clk = devm_clk_get(dev, "ext_ref_clk");
if (IS_ERR(clk)) {
dev_err(dev, "ext_ref_clk clock not found\n");
@@ -951,11 +1120,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
- if (wiz->type == AM64_WIZ_10G) {
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
ret = wiz_clock_register(wiz);
if (ret)
dev_err(dev, "Failed to register wiz clocks\n");
return ret;
+ default:
+ break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
@@ -1025,12 +1198,19 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
{
- if (wiz->type != AM64_WIZ_10G)
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
+ break;
+ case J721E_WIZ_10G:
+ case J7200_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
+ break;
+ default:
return 0;
-
- if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
- return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
-
+ }
return 0;
}
@@ -1083,6 +1263,8 @@ static const struct regmap_config wiz_regmap_config = {
static struct wiz_data j721e_16g_data = {
.type = J721E_WIZ_16G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_16g,
.pma_cmn_refclk1_dig_div = &pma_cmn_refclk1_dig_div,
.clk_mux_sel = clk_mux_sel_16g,
@@ -1091,6 +1273,8 @@ static struct wiz_data j721e_16g_data = {
static struct wiz_data j721e_10g_data = {
.type = J721E_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
@@ -1098,11 +1282,23 @@ static struct wiz_data j721e_10g_data = {
static struct wiz_data am64_10g_data = {
.type = AM64_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
+static struct wiz_data j7200_pg2_10g_data = {
+ .type = J7200_WIZ_10G,
+ .pll0_refclk_mux_sel = &sup_pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &sup_pll1_refclk_mux_sel,
+ .refclk_dig_sel = &sup_refclk_dig_sel_10g,
+ .pma_cmn_refclk1_int_mode = &sup_pma_cmn_refclk1_int_mode,
+ .clk_mux_sel = clk_mux_sel_10g_2_refclk,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data,
@@ -1113,6 +1309,9 @@ static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,am64-wiz-10g", .data = &am64_10g_data,
},
+ {
+ .compatible = "ti,j7200-wiz-10g", .data = &j7200_pg2_10g_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
@@ -1210,6 +1409,17 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ wiz->scm_regmap = syscon_regmap_lookup_by_phandle(node, "ti,scm");
+ if (IS_ERR(wiz->scm_regmap)) {
+ if (wiz->type == J7200_WIZ_10G) {
+ dev_err(dev, "Couldn't get ti,scm regmap\n");
+ ret = -ENODEV;
+ goto err_addr_to_resource;
+ }
+
+ wiz->scm_regmap = NULL;
+ }
+
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
if (ret) {
dev_err(dev, "Failed to read num-lanes property\n");
@@ -1254,7 +1464,7 @@ static int wiz_probe(struct platform_device *pdev)
ret = wiz_get_lane_phy_types(dev, wiz);
if (ret)
- return ret;
+ goto err_addr_to_resource;
wiz->dev = dev;
wiz->regmap = regmap;
@@ -1271,6 +1481,10 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ /* Enable supplemental Control override if available */
+ if (wiz->scm_regmap)
+ regmap_field_write(wiz->sup_legacy_clk_override, 1);
+
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
phy_reset_dev->ops = &wiz_phy_reset_ops,
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d768dcf75cf1..f71fefff400f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -135,6 +135,20 @@ config PINCTRL_BM1880
help
Pinctrl driver for Bitmain BM1880 SoC.
+config PINCTRL_CY8C95X0
+ tristate "Cypress CY8C95X0 I2C pinctrl and GPIO driver"
+ depends on I2C
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select REGMAP_I2C
+ help
+ Support for 20/40/60 pin Cypress Cy8C95x0 pinctrl/gpio I2C expander.
+ This driver can also be built as a module. If so, the module will be
+ called pinctrl-cy8c95x0.
+
config PINCTRL_DA850_PUPD
tristate "TI DA850/OMAP-L138/AM18XX pull-up and pull-down groups"
depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
@@ -324,6 +338,11 @@ config PINCTRL_OCELOT
select GENERIC_PINMUX_FUNCTIONS
select OF_GPIO
select REGMAP_MMIO
+ help
+ Support for the internal GPIO interfaces on Microsemi Ocelot and
+ Jaguar2 SoCs.
+
+ If conpiled as a module, the module name will be pinctrl-ocelot.
config PINCTRL_OXNAS
bool
@@ -415,23 +434,6 @@ config PINCTRL_ST
select PINCONF
select GPIOLIB_IRQCHIP
-config PINCTRL_STARFIVE
- tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC"
- depends on SOC_STARFIVE || COMPILE_TEST
- depends on OF
- default SOC_STARFIVE
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select OF_GPIO
- help
- Say yes here to support pin control on the StarFive JH7100 SoC.
- This also provides an interface to the GPIO pins not used by other
- peripherals supporting inputs, outputs, configuring pull-up/pull-down
- and interrupts on input changes.
-
config PINCTRL_STMFX
tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
depends on I2C
@@ -529,6 +531,7 @@ source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sprd/Kconfig"
+source "drivers/pinctrl/starfive/Kconfig"
source "drivers/pinctrl/stm32/Kconfig"
source "drivers/pinctrl/sunplus/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index e76f5cdc64b0..89bfa01b5231 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
+obj-$(CONFIG_PINCTRL_CY8C95X0) += pinctrl-cy8c95x0.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
@@ -43,7 +44,6 @@ obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
-obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
+obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-y += sunplus/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 83d47ff1cea8..a30912a92f05 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -92,19 +92,10 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
- int ret;
-
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- ret = aspeed_sig_expr_eval(ctx, expr, true);
- if (ret < 0)
- return ret;
-
- if (ret)
- return aspeed_sig_expr_set(ctx, expr, false);
-
- return 0;
+ return aspeed_sig_expr_set(ctx, expr, false);
}
/**
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6318.c b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
index 9311220fb6cb..64073546310e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6318.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
@@ -27,12 +27,6 @@
#define BCM6318_PAD_REG 0x54
#define BCM6328_PAD_MASK GENMASK(3, 0)
-struct bcm6318_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6318_function {
const char *name;
const char * const *groups;
@@ -146,64 +140,57 @@ static unsigned gpio47_pins[] = { 47 };
static unsigned gpio48_pins[] = { 48 };
static unsigned gpio49_pins[] = { 49 };
-#define BCM6318_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6318_pingroup bcm6318_groups[] = {
- BCM6318_GROUP(gpio0),
- BCM6318_GROUP(gpio1),
- BCM6318_GROUP(gpio2),
- BCM6318_GROUP(gpio3),
- BCM6318_GROUP(gpio4),
- BCM6318_GROUP(gpio5),
- BCM6318_GROUP(gpio6),
- BCM6318_GROUP(gpio7),
- BCM6318_GROUP(gpio8),
- BCM6318_GROUP(gpio9),
- BCM6318_GROUP(gpio10),
- BCM6318_GROUP(gpio11),
- BCM6318_GROUP(gpio12),
- BCM6318_GROUP(gpio13),
- BCM6318_GROUP(gpio14),
- BCM6318_GROUP(gpio15),
- BCM6318_GROUP(gpio16),
- BCM6318_GROUP(gpio17),
- BCM6318_GROUP(gpio18),
- BCM6318_GROUP(gpio19),
- BCM6318_GROUP(gpio20),
- BCM6318_GROUP(gpio21),
- BCM6318_GROUP(gpio22),
- BCM6318_GROUP(gpio23),
- BCM6318_GROUP(gpio24),
- BCM6318_GROUP(gpio25),
- BCM6318_GROUP(gpio26),
- BCM6318_GROUP(gpio27),
- BCM6318_GROUP(gpio28),
- BCM6318_GROUP(gpio29),
- BCM6318_GROUP(gpio30),
- BCM6318_GROUP(gpio31),
- BCM6318_GROUP(gpio32),
- BCM6318_GROUP(gpio33),
- BCM6318_GROUP(gpio34),
- BCM6318_GROUP(gpio35),
- BCM6318_GROUP(gpio36),
- BCM6318_GROUP(gpio37),
- BCM6318_GROUP(gpio38),
- BCM6318_GROUP(gpio39),
- BCM6318_GROUP(gpio40),
- BCM6318_GROUP(gpio41),
- BCM6318_GROUP(gpio42),
- BCM6318_GROUP(gpio43),
- BCM6318_GROUP(gpio44),
- BCM6318_GROUP(gpio45),
- BCM6318_GROUP(gpio46),
- BCM6318_GROUP(gpio47),
- BCM6318_GROUP(gpio48),
- BCM6318_GROUP(gpio49),
+static struct pingroup bcm6318_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(gpio48),
+ BCM_PIN_GROUP(gpio49),
};
/* GPIO_MODE */
@@ -368,10 +355,10 @@ static const char *bcm6318_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6318_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6318_groups[group].pins;
- *num_pins = bcm6318_groups[group].num_pins;
+ *npins = bcm6318_groups[group].npins;
return 0;
}
@@ -424,7 +411,7 @@ static int bcm6318_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6318_pingroup *pg = &bcm6318_groups[group];
+ const struct pingroup *pg = &bcm6318_groups[group];
const struct bcm6318_function *f = &bcm6318_funcs[selector];
bcm6318_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63268.c b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
index 1c1060a39597..80c2fc55ffa2 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm63268.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
@@ -40,12 +40,6 @@ enum bcm63268_pinctrl_reg {
BCM63268_BASEMODE,
};
-struct bcm63268_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm63268_function {
const char *name;
const char * const *groups;
@@ -185,74 +179,67 @@ static unsigned vdsl_phy1_grp_pins[] = { 12, 13 };
static unsigned vdsl_phy2_grp_pins[] = { 24, 25 };
static unsigned vdsl_phy3_grp_pins[] = { 26, 27 };
-#define BCM63268_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm63268_pingroup bcm63268_groups[] = {
- BCM63268_GROUP(gpio0),
- BCM63268_GROUP(gpio1),
- BCM63268_GROUP(gpio2),
- BCM63268_GROUP(gpio3),
- BCM63268_GROUP(gpio4),
- BCM63268_GROUP(gpio5),
- BCM63268_GROUP(gpio6),
- BCM63268_GROUP(gpio7),
- BCM63268_GROUP(gpio8),
- BCM63268_GROUP(gpio9),
- BCM63268_GROUP(gpio10),
- BCM63268_GROUP(gpio11),
- BCM63268_GROUP(gpio12),
- BCM63268_GROUP(gpio13),
- BCM63268_GROUP(gpio14),
- BCM63268_GROUP(gpio15),
- BCM63268_GROUP(gpio16),
- BCM63268_GROUP(gpio17),
- BCM63268_GROUP(gpio18),
- BCM63268_GROUP(gpio19),
- BCM63268_GROUP(gpio20),
- BCM63268_GROUP(gpio21),
- BCM63268_GROUP(gpio22),
- BCM63268_GROUP(gpio23),
- BCM63268_GROUP(gpio24),
- BCM63268_GROUP(gpio25),
- BCM63268_GROUP(gpio26),
- BCM63268_GROUP(gpio27),
- BCM63268_GROUP(gpio28),
- BCM63268_GROUP(gpio29),
- BCM63268_GROUP(gpio30),
- BCM63268_GROUP(gpio31),
- BCM63268_GROUP(gpio32),
- BCM63268_GROUP(gpio33),
- BCM63268_GROUP(gpio34),
- BCM63268_GROUP(gpio35),
- BCM63268_GROUP(gpio36),
- BCM63268_GROUP(gpio37),
- BCM63268_GROUP(gpio38),
- BCM63268_GROUP(gpio39),
- BCM63268_GROUP(gpio40),
- BCM63268_GROUP(gpio41),
- BCM63268_GROUP(gpio42),
- BCM63268_GROUP(gpio43),
- BCM63268_GROUP(gpio44),
- BCM63268_GROUP(gpio45),
- BCM63268_GROUP(gpio46),
- BCM63268_GROUP(gpio47),
- BCM63268_GROUP(gpio48),
- BCM63268_GROUP(gpio49),
- BCM63268_GROUP(gpio50),
- BCM63268_GROUP(gpio51),
+static struct pingroup bcm63268_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(gpio48),
+ BCM_PIN_GROUP(gpio49),
+ BCM_PIN_GROUP(gpio50),
+ BCM_PIN_GROUP(gpio51),
/* multi pin groups */
- BCM63268_GROUP(nand_grp),
- BCM63268_GROUP(dectpd_grp),
- BCM63268_GROUP(vdsl_phy0_grp),
- BCM63268_GROUP(vdsl_phy1_grp),
- BCM63268_GROUP(vdsl_phy2_grp),
- BCM63268_GROUP(vdsl_phy3_grp),
+ BCM_PIN_GROUP(nand_grp),
+ BCM_PIN_GROUP(dectpd_grp),
+ BCM_PIN_GROUP(vdsl_phy0_grp),
+ BCM_PIN_GROUP(vdsl_phy1_grp),
+ BCM_PIN_GROUP(vdsl_phy2_grp),
+ BCM_PIN_GROUP(vdsl_phy3_grp),
};
static const char * const led_groups[] = {
@@ -487,10 +474,10 @@ static const char *bcm63268_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm63268_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm63268_groups[group].pins;
- *num_pins = bcm63268_groups[group].num_pins;
+ *npins = bcm63268_groups[group].npins;
return 0;
}
@@ -545,13 +532,13 @@ static int bcm63268_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm63268_pingroup *pg = &bcm63268_groups[group];
+ const struct pingroup *pg = &bcm63268_groups[group];
const struct bcm63268_function *f = &bcm63268_funcs[selector];
unsigned i;
unsigned int reg;
unsigned int val, mask;
- for (i = 0; i < pg->num_pins; i++)
+ for (i = 0; i < pg->npins; i++)
bcm63268_set_gpio(pc, pg->pins[i]);
switch (f->reg) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6328.c b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
index ffa8864abab6..1e8cc2c80c81 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6328.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
@@ -26,12 +26,6 @@
#define BCM6328_MUX_OTHER_REG 0x24
#define BCM6328_MUX_MASK GENMASK(1, 0)
-struct bcm6328_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6328_function {
const char *name;
const char * const *groups;
@@ -125,49 +119,42 @@ static unsigned gpio31_pins[] = { 31 };
static unsigned hsspi_cs1_pins[] = { 36 };
static unsigned usb_port1_pins[] = { 38 };
-#define BCM6328_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6328_pingroup bcm6328_groups[] = {
- BCM6328_GROUP(gpio0),
- BCM6328_GROUP(gpio1),
- BCM6328_GROUP(gpio2),
- BCM6328_GROUP(gpio3),
- BCM6328_GROUP(gpio4),
- BCM6328_GROUP(gpio5),
- BCM6328_GROUP(gpio6),
- BCM6328_GROUP(gpio7),
- BCM6328_GROUP(gpio8),
- BCM6328_GROUP(gpio9),
- BCM6328_GROUP(gpio10),
- BCM6328_GROUP(gpio11),
- BCM6328_GROUP(gpio12),
- BCM6328_GROUP(gpio13),
- BCM6328_GROUP(gpio14),
- BCM6328_GROUP(gpio15),
- BCM6328_GROUP(gpio16),
- BCM6328_GROUP(gpio17),
- BCM6328_GROUP(gpio18),
- BCM6328_GROUP(gpio19),
- BCM6328_GROUP(gpio20),
- BCM6328_GROUP(gpio21),
- BCM6328_GROUP(gpio22),
- BCM6328_GROUP(gpio23),
- BCM6328_GROUP(gpio24),
- BCM6328_GROUP(gpio25),
- BCM6328_GROUP(gpio26),
- BCM6328_GROUP(gpio27),
- BCM6328_GROUP(gpio28),
- BCM6328_GROUP(gpio29),
- BCM6328_GROUP(gpio30),
- BCM6328_GROUP(gpio31),
-
- BCM6328_GROUP(hsspi_cs1),
- BCM6328_GROUP(usb_port1),
+static struct pingroup bcm6328_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+
+ BCM_PIN_GROUP(hsspi_cs1),
+ BCM_PIN_GROUP(usb_port1),
};
/* GPIO_MODE */
@@ -292,10 +279,10 @@ static const char *bcm6328_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6328_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6328_groups[group].pins;
- *num_pins = bcm6328_groups[group].num_pins;
+ *npins = bcm6328_groups[group].npins;
return 0;
}
@@ -338,7 +325,7 @@ static int bcm6328_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6328_pingroup *pg = &bcm6328_groups[group];
+ const struct pingroup *pg = &bcm6328_groups[group];
const struct bcm6328_function *f = &bcm6328_funcs[selector];
bcm6328_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
index 9f6cd7447887..891de49d76e7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6358.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -35,9 +35,7 @@
#define BCM6358_MODE_MUX_SYS_IRQ BIT(15)
struct bcm6358_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
+ struct pingroup grp;
const uint16_t mode_val;
@@ -131,9 +129,7 @@ static unsigned sys_irq_grp_pins[] = { 5 };
#define BCM6358_GPIO_MUX_GROUP(n, bit, dir) \
{ \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
+ .grp = BCM_PIN_GROUP(n), \
.mode_val = BCM6358_MODE_MUX_##bit, \
.direction = dir, \
}
@@ -219,15 +215,15 @@ static int bcm6358_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
static const char *bcm6358_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
- return bcm6358_groups[group].name;
+ return bcm6358_groups[group].grp.name;
}
static int bcm6358_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
- *pins = bcm6358_groups[group].pins;
- *num_pins = bcm6358_groups[group].num_pins;
+ *pins = bcm6358_groups[group].grp.pins;
+ *npins = bcm6358_groups[group].grp.npins;
return 0;
}
@@ -264,12 +260,12 @@ static int bcm6358_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int mask = val;
unsigned pin;
- for (pin = 0; pin < pg->num_pins; pin++)
+ for (pin = 0; pin < pg->grp.npins; pin++)
mask |= (unsigned long)bcm6358_pins[pin].drv_data;
regmap_field_update_bits(priv->overlays, mask, val);
- for (pin = 0; pin < pg->num_pins; pin++) {
+ for (pin = 0; pin < pg->grp.npins; pin++) {
struct pinctrl_gpio_range *range;
unsigned int hw_gpio = bcm6358_pins[pin].number;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6362.c b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
index 13c7230949b2..d9ba1b6c2aeb 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6362.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
@@ -35,12 +35,6 @@ enum bcm6362_pinctrl_reg {
BCM6362_BASEMODE,
};
-struct bcm6362_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6362_function {
const char *name;
const char * const *groups;
@@ -162,63 +156,56 @@ static unsigned nand_grp_pins[] = {
18, 19, 20, 21, 22, 23, 27,
};
-#define BCM6362_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6362_pingroup bcm6362_groups[] = {
- BCM6362_GROUP(gpio0),
- BCM6362_GROUP(gpio1),
- BCM6362_GROUP(gpio2),
- BCM6362_GROUP(gpio3),
- BCM6362_GROUP(gpio4),
- BCM6362_GROUP(gpio5),
- BCM6362_GROUP(gpio6),
- BCM6362_GROUP(gpio7),
- BCM6362_GROUP(gpio8),
- BCM6362_GROUP(gpio9),
- BCM6362_GROUP(gpio10),
- BCM6362_GROUP(gpio11),
- BCM6362_GROUP(gpio12),
- BCM6362_GROUP(gpio13),
- BCM6362_GROUP(gpio14),
- BCM6362_GROUP(gpio15),
- BCM6362_GROUP(gpio16),
- BCM6362_GROUP(gpio17),
- BCM6362_GROUP(gpio18),
- BCM6362_GROUP(gpio19),
- BCM6362_GROUP(gpio20),
- BCM6362_GROUP(gpio21),
- BCM6362_GROUP(gpio22),
- BCM6362_GROUP(gpio23),
- BCM6362_GROUP(gpio24),
- BCM6362_GROUP(gpio25),
- BCM6362_GROUP(gpio26),
- BCM6362_GROUP(gpio27),
- BCM6362_GROUP(gpio28),
- BCM6362_GROUP(gpio29),
- BCM6362_GROUP(gpio30),
- BCM6362_GROUP(gpio31),
- BCM6362_GROUP(gpio32),
- BCM6362_GROUP(gpio33),
- BCM6362_GROUP(gpio34),
- BCM6362_GROUP(gpio35),
- BCM6362_GROUP(gpio36),
- BCM6362_GROUP(gpio37),
- BCM6362_GROUP(gpio38),
- BCM6362_GROUP(gpio39),
- BCM6362_GROUP(gpio40),
- BCM6362_GROUP(gpio41),
- BCM6362_GROUP(gpio42),
- BCM6362_GROUP(gpio43),
- BCM6362_GROUP(gpio44),
- BCM6362_GROUP(gpio45),
- BCM6362_GROUP(gpio46),
- BCM6362_GROUP(gpio47),
- BCM6362_GROUP(nand_grp),
+static struct pingroup bcm6362_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(gpio32),
+ BCM_PIN_GROUP(gpio33),
+ BCM_PIN_GROUP(gpio34),
+ BCM_PIN_GROUP(gpio35),
+ BCM_PIN_GROUP(gpio36),
+ BCM_PIN_GROUP(gpio37),
+ BCM_PIN_GROUP(gpio38),
+ BCM_PIN_GROUP(gpio39),
+ BCM_PIN_GROUP(gpio40),
+ BCM_PIN_GROUP(gpio41),
+ BCM_PIN_GROUP(gpio42),
+ BCM_PIN_GROUP(gpio43),
+ BCM_PIN_GROUP(gpio44),
+ BCM_PIN_GROUP(gpio45),
+ BCM_PIN_GROUP(gpio46),
+ BCM_PIN_GROUP(gpio47),
+ BCM_PIN_GROUP(nand_grp),
};
static const char * const led_groups[] = {
@@ -463,10 +450,10 @@ static const char *bcm6362_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6362_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6362_groups[group].pins;
- *num_pins = bcm6362_groups[group].num_pins;
+ *npins = bcm6362_groups[group].npins;
return 0;
}
@@ -519,13 +506,13 @@ static int bcm6362_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned selector, unsigned group)
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm6362_pingroup *pg = &bcm6362_groups[group];
+ const struct pingroup *pg = &bcm6362_groups[group];
const struct bcm6362_function *f = &bcm6362_funcs[selector];
unsigned i;
unsigned int reg;
unsigned int val, mask;
- for (i = 0; i < pg->num_pins; i++)
+ for (i = 0; i < pg->npins; i++)
bcm6362_set_gpio(pc, pg->pins[i]);
switch (f->reg) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6368.c b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
index b33a74aec82b..6208467ba6f9 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6368.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
@@ -26,12 +26,6 @@
#define BCM6368_BASEMODE_GPIO 0x0
#define BCM6368_BASEMODE_UART1 0x1
-struct bcm6368_pingroup {
- const char *name;
- const unsigned * const pins;
- const unsigned num_pins;
-};
-
struct bcm6368_function {
const char *name;
const char * const *groups;
@@ -127,47 +121,40 @@ static unsigned gpio30_pins[] = { 30 };
static unsigned gpio31_pins[] = { 31 };
static unsigned uart1_grp_pins[] = { 30, 31, 32, 33 };
-#define BCM6368_GROUP(n) \
- { \
- .name = #n, \
- .pins = n##_pins, \
- .num_pins = ARRAY_SIZE(n##_pins), \
- }
-
-static struct bcm6368_pingroup bcm6368_groups[] = {
- BCM6368_GROUP(gpio0),
- BCM6368_GROUP(gpio1),
- BCM6368_GROUP(gpio2),
- BCM6368_GROUP(gpio3),
- BCM6368_GROUP(gpio4),
- BCM6368_GROUP(gpio5),
- BCM6368_GROUP(gpio6),
- BCM6368_GROUP(gpio7),
- BCM6368_GROUP(gpio8),
- BCM6368_GROUP(gpio9),
- BCM6368_GROUP(gpio10),
- BCM6368_GROUP(gpio11),
- BCM6368_GROUP(gpio12),
- BCM6368_GROUP(gpio13),
- BCM6368_GROUP(gpio14),
- BCM6368_GROUP(gpio15),
- BCM6368_GROUP(gpio16),
- BCM6368_GROUP(gpio17),
- BCM6368_GROUP(gpio18),
- BCM6368_GROUP(gpio19),
- BCM6368_GROUP(gpio20),
- BCM6368_GROUP(gpio21),
- BCM6368_GROUP(gpio22),
- BCM6368_GROUP(gpio23),
- BCM6368_GROUP(gpio24),
- BCM6368_GROUP(gpio25),
- BCM6368_GROUP(gpio26),
- BCM6368_GROUP(gpio27),
- BCM6368_GROUP(gpio28),
- BCM6368_GROUP(gpio29),
- BCM6368_GROUP(gpio30),
- BCM6368_GROUP(gpio31),
- BCM6368_GROUP(uart1_grp),
+static struct pingroup bcm6368_groups[] = {
+ BCM_PIN_GROUP(gpio0),
+ BCM_PIN_GROUP(gpio1),
+ BCM_PIN_GROUP(gpio2),
+ BCM_PIN_GROUP(gpio3),
+ BCM_PIN_GROUP(gpio4),
+ BCM_PIN_GROUP(gpio5),
+ BCM_PIN_GROUP(gpio6),
+ BCM_PIN_GROUP(gpio7),
+ BCM_PIN_GROUP(gpio8),
+ BCM_PIN_GROUP(gpio9),
+ BCM_PIN_GROUP(gpio10),
+ BCM_PIN_GROUP(gpio11),
+ BCM_PIN_GROUP(gpio12),
+ BCM_PIN_GROUP(gpio13),
+ BCM_PIN_GROUP(gpio14),
+ BCM_PIN_GROUP(gpio15),
+ BCM_PIN_GROUP(gpio16),
+ BCM_PIN_GROUP(gpio17),
+ BCM_PIN_GROUP(gpio18),
+ BCM_PIN_GROUP(gpio19),
+ BCM_PIN_GROUP(gpio20),
+ BCM_PIN_GROUP(gpio21),
+ BCM_PIN_GROUP(gpio22),
+ BCM_PIN_GROUP(gpio23),
+ BCM_PIN_GROUP(gpio24),
+ BCM_PIN_GROUP(gpio25),
+ BCM_PIN_GROUP(gpio26),
+ BCM_PIN_GROUP(gpio27),
+ BCM_PIN_GROUP(gpio28),
+ BCM_PIN_GROUP(gpio29),
+ BCM_PIN_GROUP(gpio30),
+ BCM_PIN_GROUP(gpio31),
+ BCM_PIN_GROUP(uart1_grp),
};
static const char * const analog_afe_0_groups[] = {
@@ -358,10 +345,10 @@ static const char *bcm6368_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
static int bcm6368_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group, const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
*pins = bcm6368_groups[group].pins;
- *num_pins = bcm6368_groups[group].num_pins;
+ *npins = bcm6368_groups[group].npins;
return 0;
}
@@ -393,14 +380,14 @@ static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
{
struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
struct bcm6368_priv *priv = pc->driver_data;
- const struct bcm6368_pingroup *pg = &bcm6368_groups[group];
+ const struct pingroup *pg = &bcm6368_groups[group];
const struct bcm6368_function *fun = &bcm6368_funcs[selector];
int i, pin;
if (fun->basemode) {
unsigned int mask = 0;
- for (i = 0; i < pg->num_pins; i++) {
+ for (i = 0; i < pg->npins; i++) {
pin = pg->pins[i];
if (pin < BCM63XX_BANK_GPIOS)
mask |= BIT(pin);
@@ -419,7 +406,7 @@ static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
BIT(pin));
}
- for (pin = 0; pin < pg->num_pins; pin++) {
+ for (pin = 0; pin < pg->npins; pin++) {
struct pinctrl_gpio_range *range;
int hw_gpio = bcm6368_pins[pin].number;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
index d58c8cd5b6b8..95243027ecd9 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
@@ -21,6 +21,8 @@ struct bcm63xx_pinctrl_soc {
unsigned int ngpios;
};
+#define BCM_PIN_GROUP(n) PINCTRL_PINGROUP(#n, n##_pins, ARRAY_SIZE(n##_pins))
+
struct bcm63xx_pinctrl {
struct device *dev;
struct regmap *regs;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index 65a86543c58c..465cc96814a1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -233,10 +233,8 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"cru_gpio_control");
ns_pinctrl->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ns_pinctrl->base)) {
- dev_err(dev, "Failed to map pinctrl regs\n");
+ if (IS_ERR(ns_pinctrl->base))
return PTR_ERR(ns_pinctrl->base);
- }
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index a073eedd71aa..1e427ea4d31b 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -209,7 +209,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
for (i = 0; i < pctrl->desc->ngroups; i++) {
desc_group = pctrl->desc->groups + i;
- /* compute the maxiumum number of functions a group can have */
+ /* compute the maximum number of functions a group can have */
max_functions += 1 << (desc_group->bit_width + 1);
}
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index d96b1130efd3..7a32f77792d9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -119,28 +119,32 @@ config PINCTRL_IMX7ULP
config PINCTRL_IMX8MM
tristate "IMX8MM pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
config PINCTRL_IMX8MN
tristate "IMX8MN pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
config PINCTRL_IMX8MP
tristate "IMX8MP pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
config PINCTRL_IMX8MQ
tristate "IMX8MQ pinctrl driver"
- depends on ARCH_MXC
+ depends on OF
+ depends on SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1600a2c18eee..fed02c6fea06 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -162,6 +162,18 @@ config PINCTRL_MT8186
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8188
+ bool "MediaTek MT8188 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on MediaTek MT8188 SoC.
+ In MTK platform, we support virtual gpio and use it to
+ map specific eint which doesn't have real gpio pin.
+
config PINCTRL_MT8192
bool "Mediatek MT8192 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index c8f226ae36c9..53265404a39d 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o
+obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
obj-$(CONFIG_PINCTRL_MT8365) += pinctrl-mt8365.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
new file mode 100644
index 000000000000..d0e75c1b4417
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
@@ -0,0 +1,1673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ *
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt8188.h"
+#include "pinctrl-paris.h"
+
+/* MT8188 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11c00000, iocfg[2]:0x11e10000,
+ * iocfg[3]:0x11e20000, iocfg[4]:0x11ea0000
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8188_pin_mode_range[] = {
+ PIN_FIELD(0, 177, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_dir_range[] = {
+ PIN_FIELD(0, 177, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_di_range[] = {
+ PIN_FIELD(0, 177, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_do_range[] = {
+ PIN_FIELD(0, 177, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0170, 0x10, 8, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x0170, 0x10, 9, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x0170, 0x10, 10, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x0170, 0x10, 11, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x0170, 0x10, 12, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x0170, 0x10, 13, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x0170, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x0170, 0x10, 15, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x0160, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x0160, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x0160, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x0160, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x0170, 0x10, 18, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x0170, 0x10, 17, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0170, 0x10, 19, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0170, 0x10, 20, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0170, 0x10, 21, 1),
+ PIN_FIELD_BASE(42, 42, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0170, 0x10, 16, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0170, 0x10, 25, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0170, 0x10, 28, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x0160, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x0160, 0x10, 31, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0170, 0x10, 26, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0170, 0x10, 29, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0170, 0x10, 27, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0170, 0x10, 30, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x0160, 0x10, 30, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x0170, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0180, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0170, 0x10, 31, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0180, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0180, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0180, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0180, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0180, 0x10, 6, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0180, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x0180, 0x10, 8, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x0180, 0x10, 7, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0160, 0x10, 27, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0160, 0x10, 18, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0160, 0x10, 22, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0160, 0x10, 16, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0160, 0x10, 23, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0160, 0x10, 24, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0160, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0160, 0x10, 19, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0160, 0x10, 20, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0160, 0x10, 21, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x0160, 0x10, 25, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x0160, 0x10, 26, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0170, 0x10, 0, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0170, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0170, 0x10, 6, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0170, 0x10, 7, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0170, 0x10, 22, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0170, 0x10, 23, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0170, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0170, 0x10, 3, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0170, 0x10, 4, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0170, 0x10, 5, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0170, 0x10, 24, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0160, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0160, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0160, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0160, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0160, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0160, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0160, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0160, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0160, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0160, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0160, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0160, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x0160, 0x10, 28, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x0160, 0x10, 28, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(42, 42, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x0080, 0x10, 28, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_tdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(1, 1, 1, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(2, 2, 1, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(3, 3, 1, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(4, 4, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(5, 5, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(6, 6, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(7, 7, 1, 0x01b0, 0x10, 16, 4),
+ PIN_FIELD_BASE(8, 8, 1, 0x01b0, 0x10, 20, 4),
+ PIN_FIELD_BASE(9, 9, 1, 0x01b0, 0x10, 24, 4),
+ PIN_FIELD_BASE(10, 10, 1, 0x01b0, 0x10, 28, 4),
+ PIN_FIELD_BASE(11, 11, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(12, 12, 2, 0x0190, 0x10, 16, 4),
+ PIN_FIELD_BASE(13, 13, 2, 0x0190, 0x10, 20, 4),
+ PIN_FIELD_BASE(14, 14, 2, 0x0190, 0x10, 24, 4),
+ PIN_FIELD_BASE(15, 15, 2, 0x0190, 0x10, 28, 4),
+ PIN_FIELD_BASE(16, 16, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(17, 17, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(18, 18, 4, 0x0110, 0x10, 4, 4),
+ PIN_FIELD_BASE(19, 19, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(20, 20, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(21, 21, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(22, 22, 4, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(23, 23, 4, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(24, 24, 4, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(25, 25, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(26, 26, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(27, 27, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(28, 28, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(29, 29, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(30, 30, 1, 0x01c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(31, 31, 1, 0x01c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(32, 32, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(33, 33, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(34, 34, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(35, 35, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(36, 36, 1, 0x01c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(37, 37, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(38, 38, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(39, 39, 1, 0x01c0, 0x10, 28, 4),
+ PIN_FIELD_BASE(40, 40, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(41, 41, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(42, 42, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(43, 43, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(44, 44, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(45, 45, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(46, 46, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(47, 47, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(48, 48, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(49, 49, 1, 0x01c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(50, 50, 3, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(51, 51, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(52, 52, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(53, 53, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(54, 54, 3, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(55, 55, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(56, 56, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(57, 57, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(58, 58, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(59, 59, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(60, 60, 1, 0x01c0, 0x10, 12, 4),
+ PIN_FIELD_BASE(61, 61, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(62, 62, 1, 0x01c0, 0x10, 16, 4),
+ PIN_FIELD_BASE(63, 63, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(64, 64, 2, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(65, 65, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(66, 66, 4, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(67, 67, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(68, 68, 4, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(69, 69, 1, 0x01d0, 0x10, 16, 4),
+ PIN_FIELD_BASE(70, 70, 1, 0x01d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(71, 71, 1, 0x01e0, 0x10, 0, 4),
+ PIN_FIELD_BASE(72, 72, 1, 0x01d0, 0x10, 28, 4),
+ PIN_FIELD_BASE(73, 73, 1, 0x01d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(74, 74, 1, 0x01d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(75, 75, 1, 0x01e0, 0x10, 8, 4),
+ PIN_FIELD_BASE(76, 76, 1, 0x01e0, 0x10, 4, 4),
+ PIN_FIELD_BASE(77, 77, 1, 0x01e0, 0x10, 16, 4),
+ PIN_FIELD_BASE(78, 78, 1, 0x01e0, 0x10, 12, 4),
+ PIN_FIELD_BASE(79, 79, 4, 0x0110, 0x10, 20, 4),
+ PIN_FIELD_BASE(80, 80, 4, 0x0110, 0x10, 16, 4),
+ PIN_FIELD_BASE(81, 81, 4, 0x0110, 0x10, 28, 4),
+ PIN_FIELD_BASE(82, 82, 4, 0x0110, 0x10, 24, 4),
+ PIN_FIELD_BASE(83, 83, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(84, 84, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(85, 85, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(86, 86, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(87, 87, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(88, 88, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(89, 89, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(90, 90, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(91, 91, 2, 0x01b0, 0x10, 12, 4),
+ PIN_FIELD_BASE(92, 92, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(93, 93, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(94, 94, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(95, 95, 2, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(96, 96, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(97, 97, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(98, 98, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(99, 99, 2, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(100, 100, 2, 0x01b0, 0x10, 20, 4),
+ PIN_FIELD_BASE(101, 101, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(102, 102, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(103, 103, 2, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(104, 104, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(105, 105, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(106, 106, 2, 0x01b0, 0x10, 0, 4),
+ PIN_FIELD_BASE(107, 107, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(108, 108, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(109, 109, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(110, 110, 2, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(111, 111, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(112, 112, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(113, 113, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(114, 114, 2, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(115, 115, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(116, 116, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(117, 117, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(118, 118, 2, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(119, 119, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(120, 120, 2, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(121, 121, 3, 0x00f0, 0x10, 24, 4),
+ PIN_FIELD_BASE(122, 122, 3, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(123, 123, 3, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(124, 124, 3, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(125, 125, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(126, 126, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(127, 127, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(128, 128, 2, 0x01b0, 0x10, 4, 4),
+ PIN_FIELD_BASE(129, 129, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(130, 130, 2, 0x01b0, 0x10, 8, 4),
+ PIN_FIELD_BASE(131, 131, 1, 0x01a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(132, 132, 1, 0x01a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(133, 133, 1, 0x01a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(134, 134, 1, 0x01a0, 0x10, 28, 4),
+ PIN_FIELD_BASE(135, 135, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(136, 136, 1, 0x01d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(137, 137, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(138, 138, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(139, 139, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(140, 140, 1, 0x01d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(141, 141, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(142, 142, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(143, 143, 1, 0x01a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(144, 144, 1, 0x01a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(145, 145, 1, 0x01a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(146, 146, 1, 0x01a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(147, 147, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(148, 148, 1, 0x01d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(149, 149, 1, 0x01c0, 0x10, 4, 4),
+ PIN_FIELD_BASE(150, 150, 1, 0x01c0, 0x10, 4, 4),
+ PIN_FIELD_BASE(151, 151, 2, 0x0190, 0x10, 4, 4),
+ PIN_FIELD_BASE(152, 152, 2, 0x0190, 0x10, 0, 4),
+ PIN_FIELD_BASE(153, 153, 2, 0x0180, 0x10, 28, 4),
+ PIN_FIELD_BASE(154, 154, 2, 0x0180, 0x10, 24, 4),
+ PIN_FIELD_BASE(155, 155, 2, 0x0190, 0x10, 12, 4),
+ PIN_FIELD_BASE(156, 156, 2, 0x0180, 0x10, 4, 4),
+ PIN_FIELD_BASE(157, 157, 2, 0x0180, 0x10, 0, 4),
+ PIN_FIELD_BASE(158, 158, 2, 0x0180, 0x10, 20, 4),
+ PIN_FIELD_BASE(159, 159, 2, 0x0180, 0x10, 16, 4),
+ PIN_FIELD_BASE(160, 160, 2, 0x0180, 0x10, 12, 4),
+ PIN_FIELD_BASE(161, 161, 2, 0x0180, 0x10, 8, 4),
+ PIN_FIELD_BASE(162, 162, 2, 0x0190, 0x10, 8, 4),
+ PIN_FIELD_BASE(163, 163, 4, 0x0100, 0x10, 16, 4),
+ PIN_FIELD_BASE(164, 164, 4, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(165, 165, 4, 0x0100, 0x10, 20, 4),
+ PIN_FIELD_BASE(166, 166, 4, 0x0100, 0x10, 24, 4),
+ PIN_FIELD_BASE(167, 167, 4, 0x0100, 0x10, 28, 4),
+ PIN_FIELD_BASE(168, 168, 4, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(169, 169, 3, 0x00f0, 0x10, 4, 4),
+ PIN_FIELD_BASE(170, 170, 3, 0x00f0, 0x10, 0, 4),
+ PIN_FIELD_BASE(171, 171, 3, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(172, 172, 3, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(173, 173, 3, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(174, 174, 3, 0x00f0, 0x10, 20, 4),
+ PIN_FIELD_BASE(175, 175, 2, 0x01b0, 0x10, 16, 4),
+ PIN_FIELD_BASE(176, 176, 2, 0x01b0, 0x10, 16, 4),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_rdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0130, 0x10, 18, 2),
+ PIN_FIELD_BASE(1, 1, 1, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(2, 2, 1, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(3, 3, 1, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(4, 4, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(5, 5, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(6, 6, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(7, 7, 1, 0x0130, 0x10, 26, 2),
+ PIN_FIELD_BASE(8, 8, 1, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(9, 9, 1, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(10, 10, 1, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(11, 11, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(12, 12, 2, 0x0130, 0x10, 12, 2),
+ PIN_FIELD_BASE(13, 13, 2, 0x0130, 0x10, 14, 2),
+ PIN_FIELD_BASE(14, 14, 2, 0x0130, 0x10, 16, 2),
+ PIN_FIELD_BASE(15, 15, 2, 0x0130, 0x10, 18, 2),
+ PIN_FIELD_BASE(16, 16, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(17, 17, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(18, 18, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(19, 19, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(20, 20, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(21, 21, 4, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(22, 22, 4, 0x00b0, 0x10, 0, 2),
+ PIN_FIELD_BASE(23, 23, 4, 0x00b0, 0x10, 2, 2),
+ PIN_FIELD_BASE(24, 24, 4, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(25, 25, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(26, 26, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(27, 27, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(28, 28, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(29, 29, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(30, 30, 1, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(31, 31, 1, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(32, 32, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(33, 33, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(34, 34, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(35, 35, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(36, 36, 1, 0x0140, 0x10, 18, 2),
+ PIN_FIELD_BASE(37, 37, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(38, 38, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(39, 39, 1, 0x0140, 0x10, 20, 2),
+ PIN_FIELD_BASE(40, 40, 1, 0x0140, 0x10, 22, 2),
+ PIN_FIELD_BASE(41, 41, 1, 0x0140, 0x10, 22, 2),
+ PIN_FIELD_BASE(42, 42, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(43, 43, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(44, 44, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(45, 45, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(46, 46, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(47, 47, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(48, 48, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(49, 49, 1, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(50, 50, 3, 0x00b0, 0x10, 14, 2),
+ PIN_FIELD_BASE(51, 51, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(52, 52, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(53, 53, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(54, 54, 3, 0x00b0, 0x10, 16, 2),
+ PIN_FIELD_BASE(55, 55, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(56, 56, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(57, 57, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(58, 58, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(59, 59, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(60, 60, 1, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(61, 61, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(62, 62, 1, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(63, 63, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(64, 64, 2, 0x0140, 0x10, 0, 2),
+ PIN_FIELD_BASE(65, 65, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(66, 66, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(67, 67, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(68, 68, 4, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(69, 69, 1, 0x0150, 0x10, 14, 2),
+ PIN_FIELD_BASE(70, 70, 1, 0x0150, 0x10, 12, 2),
+ PIN_FIELD_BASE(71, 71, 1, 0x0150, 0x10, 22, 2),
+ PIN_FIELD_BASE(72, 72, 1, 0x0150, 0x10, 20, 2),
+ PIN_FIELD_BASE(73, 73, 1, 0x0150, 0x10, 16, 2),
+ PIN_FIELD_BASE(74, 74, 1, 0x0150, 0x10, 18, 2),
+ PIN_FIELD_BASE(75, 75, 1, 0x0150, 0x10, 26, 2),
+ PIN_FIELD_BASE(76, 76, 1, 0x0150, 0x10, 24, 2),
+ PIN_FIELD_BASE(77, 77, 1, 0x0150, 0x10, 30, 2),
+ PIN_FIELD_BASE(78, 78, 1, 0x0150, 0x10, 28, 2),
+ PIN_FIELD_BASE(79, 79, 4, 0x00c0, 0x10, 18, 2),
+ PIN_FIELD_BASE(80, 80, 4, 0x00c0, 0x10, 16, 2),
+ PIN_FIELD_BASE(81, 81, 4, 0x00c0, 0x10, 22, 2),
+ PIN_FIELD_BASE(82, 82, 4, 0x00c0, 0x10, 20, 2),
+ PIN_FIELD_BASE(83, 83, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(84, 84, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(85, 85, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(86, 86, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(87, 87, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(88, 88, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(89, 89, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(90, 90, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(91, 91, 2, 0x0140, 0x10, 12, 2),
+ PIN_FIELD_BASE(92, 92, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(93, 93, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(94, 94, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(95, 95, 2, 0x0130, 0x10, 22, 2),
+ PIN_FIELD_BASE(96, 96, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(97, 97, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(98, 98, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(99, 99, 2, 0x0140, 0x10, 2, 2),
+ PIN_FIELD_BASE(100, 100, 2, 0x0140, 0x10, 16, 2),
+ PIN_FIELD_BASE(101, 101, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(102, 102, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(103, 103, 2, 0x0140, 0x10, 4, 2),
+ PIN_FIELD_BASE(104, 104, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(105, 105, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(106, 106, 2, 0x0140, 0x10, 6, 2),
+ PIN_FIELD_BASE(107, 107, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(108, 108, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(109, 109, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(110, 110, 2, 0x0130, 0x10, 20, 2),
+ PIN_FIELD_BASE(111, 111, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(112, 112, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(113, 113, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(114, 114, 2, 0x0130, 0x10, 24, 2),
+ PIN_FIELD_BASE(115, 115, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(116, 116, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(117, 117, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(118, 118, 2, 0x0130, 0x10, 28, 2),
+ PIN_FIELD_BASE(119, 119, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(120, 120, 2, 0x0130, 0x10, 30, 2),
+ PIN_FIELD_BASE(121, 121, 3, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(122, 122, 3, 0x00b0, 0x10, 12, 2),
+ PIN_FIELD_BASE(123, 123, 3, 0x00b0, 0x10, 10, 2),
+ PIN_FIELD_BASE(124, 124, 3, 0x00b0, 0x10, 8, 2),
+ PIN_FIELD_BASE(125, 125, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(126, 126, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(127, 127, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(128, 128, 2, 0x0140, 0x10, 8, 2),
+ PIN_FIELD_BASE(129, 129, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(130, 130, 2, 0x0140, 0x10, 10, 2),
+ PIN_FIELD_BASE(131, 131, 1, 0x0120, 0x10, 0, 6),
+ PIN_FIELD_BASE(132, 132, 1, 0x0130, 0x10, 0, 6),
+ PIN_FIELD_BASE(133, 133, 1, 0x0130, 0x10, 6, 6),
+ PIN_FIELD_BASE(134, 134, 1, 0x0130, 0x10, 12, 6),
+ PIN_FIELD_BASE(135, 135, 1, 0x0140, 0x10, 24, 6),
+ PIN_FIELD_BASE(136, 136, 1, 0x0140, 0x10, 24, 6),
+ PIN_FIELD_BASE(137, 137, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(138, 138, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(139, 139, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(140, 140, 1, 0x0150, 0x10, 0, 6),
+ PIN_FIELD_BASE(141, 141, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(142, 142, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(143, 143, 1, 0x0120, 0x10, 6, 6),
+ PIN_FIELD_BASE(144, 144, 1, 0x0120, 0x10, 12, 6),
+ PIN_FIELD_BASE(145, 145, 1, 0x0120, 0x10, 18, 6),
+ PIN_FIELD_BASE(146, 146, 1, 0x0120, 0x10, 24, 6),
+ PIN_FIELD_BASE(147, 147, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(148, 148, 1, 0x0150, 0x10, 6, 6),
+ PIN_FIELD_BASE(149, 149, 1, 0x0140, 0x10, 4, 6),
+ PIN_FIELD_BASE(150, 150, 1, 0x0140, 0x10, 4, 6),
+ PIN_FIELD_BASE(151, 151, 2, 0x0120, 0x10, 24, 6),
+ PIN_FIELD_BASE(152, 152, 2, 0x0120, 0x10, 18, 6),
+ PIN_FIELD_BASE(153, 153, 2, 0x0120, 0x10, 12, 6),
+ PIN_FIELD_BASE(154, 154, 2, 0x0120, 0x10, 6, 6),
+ PIN_FIELD_BASE(155, 155, 2, 0x0130, 0x10, 6, 6),
+ PIN_FIELD_BASE(156, 156, 2, 0x0110, 0x10, 6, 6),
+ PIN_FIELD_BASE(157, 157, 2, 0x0110, 0x10, 0, 6),
+ PIN_FIELD_BASE(158, 158, 2, 0x0120, 0x10, 0, 6),
+ PIN_FIELD_BASE(159, 159, 2, 0x0110, 0x10, 24, 6),
+ PIN_FIELD_BASE(160, 160, 2, 0x0110, 0x10, 18, 6),
+ PIN_FIELD_BASE(161, 161, 2, 0x0110, 0x10, 12, 6),
+ PIN_FIELD_BASE(162, 162, 2, 0x0130, 0x10, 0, 6),
+ PIN_FIELD_BASE(163, 163, 4, 0x00b0, 0x10, 12, 6),
+ PIN_FIELD_BASE(164, 164, 4, 0x00b0, 0x10, 6, 6),
+ PIN_FIELD_BASE(165, 165, 4, 0x00b0, 0x10, 18, 6),
+ PIN_FIELD_BASE(166, 166, 4, 0x00b0, 0x10, 24, 6),
+ PIN_FIELD_BASE(167, 167, 4, 0x00c0, 0x10, 0, 6),
+ PIN_FIELD_BASE(168, 168, 4, 0x00c0, 0x10, 6, 6),
+ PIN_FIELD_BASE(169, 169, 3, 0x00a0, 0x10, 6, 6),
+ PIN_FIELD_BASE(170, 170, 3, 0x00a0, 0x10, 0, 6),
+ PIN_FIELD_BASE(171, 171, 3, 0x00a0, 0x10, 12, 6),
+ PIN_FIELD_BASE(172, 172, 3, 0x00a0, 0x10, 18, 6),
+ PIN_FIELD_BASE(173, 173, 3, 0x00a0, 0x10, 24, 6),
+ PIN_FIELD_BASE(174, 174, 3, 0x00b0, 0x10, 0, 6),
+ PIN_FIELD_BASE(175, 175, 2, 0x0140, 0x10, 14, 2),
+ PIN_FIELD_BASE(176, 176, 2, 0x0140, 0x10, 14, 2),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pupd_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0060, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_r0_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0100, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0100, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0100, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0100, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0100, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0100, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0100, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0100, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0100, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0100, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0100, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0080, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_r1_range[] = {
+ PIN_FIELD_BASE(42, 42, 2, 0x0100, 0x10, 12, 1),
+ PIN_FIELD_BASE(43, 43, 2, 0x0100, 0x10, 13, 1),
+ PIN_FIELD_BASE(44, 44, 2, 0x0100, 0x10, 14, 1),
+ PIN_FIELD_BASE(45, 45, 2, 0x0100, 0x10, 15, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0110, 0x10, 1, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0110, 0x10, 2, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0110, 0x10, 9, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0110, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0110, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0110, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0110, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0110, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0110, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0110, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0110, 0x10, 4, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0110, 0x10, 5, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0110, 0x10, 6, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0110, 0x10, 7, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0110, 0x10, 8, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0110, 0x10, 18, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0110, 0x10, 19, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0110, 0x10, 0, 1),
+ PIN_FIELD_BASE(151, 151, 2, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(152, 152, 2, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(153, 153, 2, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(154, 154, 2, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 2, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(156, 156, 2, 0x0100, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 2, 0x0100, 0x10, 3, 1),
+ PIN_FIELD_BASE(161, 161, 2, 0x0100, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 2, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 3, 0x0090, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00e0, 0x10, 25, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00e0, 0x10, 24, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00e0, 0x10, 26, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00e0, 0x10, 29, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00e0, 0x10, 27, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00e0, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00e0, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00d0, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00d0, 0x10, 30, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x00d0, 0x10, 21, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00d0, 0x10, 22, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00d0, 0x10, 20, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00d0, 0x10, 28, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00d0, 0x10, 23, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00d0, 0x10, 29, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00d0, 0x10, 24, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00d0, 0x10, 25, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00d0, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00d0, 0x10, 27, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x00e0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 1, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(3, 3, 1, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(4, 4, 1, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(5, 5, 1, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(6, 6, 1, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(7, 7, 1, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(8, 8, 1, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(9, 9, 1, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(10, 10, 1, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(14, 14, 2, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(15, 15, 2, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(18, 18, 4, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(19, 19, 4, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 4, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 4, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 4, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 4, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 1, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(26, 26, 1, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(27, 27, 1, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 1, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(29, 29, 1, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 1, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00b0, 0x10, 25, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00b0, 0x10, 24, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00b0, 0x10, 31, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(65, 65, 4, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 4, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(67, 67, 4, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(68, 68, 4, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(78, 78, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(80, 80, 4, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(81, 81, 4, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(82, 82, 4, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(84, 84, 2, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(85, 85, 2, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(86, 86, 2, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(87, 87, 2, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(88, 88, 2, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(121, 121, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(123, 123, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(124, 124, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(126, 126, 2, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 2, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 2, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(129, 129, 2, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 2, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(175, 175, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(176, 176, 2, 0x00b0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(1, 1, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(2, 2, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(3, 3, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(4, 4, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(5, 5, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(6, 6, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(7, 7, 1, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(8, 8, 1, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(9, 9, 1, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(10, 10, 1, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(11, 11, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(12, 12, 2, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(13, 13, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(14, 14, 2, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(15, 15, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(16, 16, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(17, 17, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(18, 18, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(19, 19, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(20, 20, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(21, 21, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(22, 22, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(23, 23, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(24, 24, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(25, 25, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(26, 26, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(27, 27, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(28, 28, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(29, 29, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(30, 30, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(31, 31, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(33, 33, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(38, 38, 1, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(39, 39, 1, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(40, 40, 1, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(41, 41, 1, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(43, 43, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(44, 44, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(45, 45, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(46, 46, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(47, 47, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(48, 48, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(50, 50, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(52, 52, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0030, 0x10, 18, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(69, 69, 1, 0x0030, 0x10, 18, 3),
+ PIN_FIELD_BASE(70, 70, 1, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(71, 71, 1, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(72, 72, 1, 0x0030, 0x10, 27, 3),
+ PIN_FIELD_BASE(73, 73, 1, 0x0030, 0x10, 21, 3),
+ PIN_FIELD_BASE(74, 74, 1, 0x0030, 0x10, 24, 3),
+ PIN_FIELD_BASE(75, 75, 1, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(76, 76, 1, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(77, 77, 1, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(78, 78, 1, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(79, 79, 4, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(80, 80, 4, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(81, 81, 4, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(82, 82, 4, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(83, 83, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(84, 84, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(85, 85, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(86, 86, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(87, 87, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(88, 88, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(90, 90, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(92, 92, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(93, 93, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(94, 94, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(95, 95, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(98, 98, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(99, 99, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(100, 100, 2, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(101, 101, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(102, 102, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(103, 103, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(104, 104, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(105, 105, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(106, 106, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(107, 107, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(108, 108, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(109, 109, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(110, 110, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(111, 111, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(112, 112, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(113, 113, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(114, 114, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(115, 115, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(116, 116, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(117, 117, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(118, 118, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(121, 121, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(124, 124, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(126, 126, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(127, 127, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(128, 128, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(129, 129, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(130, 130, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(131, 131, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(132, 132, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(133, 133, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(134, 134, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(135, 135, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(136, 136, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(137, 137, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(138, 138, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(139, 139, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(140, 140, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(141, 141, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(142, 142, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(144, 144, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(145, 145, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(146, 146, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(147, 147, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(148, 148, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(149, 149, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(150, 150, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(151, 151, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(152, 152, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(153, 153, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(154, 154, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(155, 155, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(156, 156, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(157, 157, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(158, 158, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(159, 159, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(160, 160, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(161, 161, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(162, 162, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(163, 163, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(164, 164, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(165, 165, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(166, 166, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(167, 167, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(168, 168, 4, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(169, 169, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(171, 171, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(172, 172, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(173, 173, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(174, 174, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0030, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(53, 53, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 9, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0050, 0x10, 6, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0060, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0060, 0x10, 12, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0060, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0060, 0x10, 15, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0050, 0x10, 9, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0050, 0x10, 12, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0050, 0x10, 15, 3),
+};
+
+static const struct mtk_pin_field_calc mt8188_pin_rsel_range[] = {
+ PIN_FIELD_BASE(53, 53, 3, 0x00c0, 0x10, 0, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x00c0, 0x10, 3, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0160, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0160, 0x10, 9, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0150, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0150, 0x10, 6, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x0160, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x0160, 0x10, 12, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x0160, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x0160, 0x10, 15, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0150, 0x10, 3, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0150, 0x10, 9, 3),
+ PIN_FIELD_BASE(65, 65, 4, 0x00d0, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 4, 0x00d0, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 4, 0x00d0, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 4, 0x00d0, 0x10, 9, 3),
+ PIN_FIELD_BASE(175, 175, 2, 0x0150, 0x10, 12, 3),
+ PIN_FIELD_BASE(176, 176, 2, 0x0150, 0x10, 15, 3),
+};
+
+static const struct mtk_pin_rsel mt8188_pin_rsel_val_range[] = {
+ PIN_RSEL(53, 68, 0x0, 75000, 75000),
+ PIN_RSEL(53, 68, 0x1, 10000, 5000),
+ PIN_RSEL(53, 68, 0x2, 5000, 75000),
+ PIN_RSEL(53, 68, 0x3, 4000, 5000),
+ PIN_RSEL(53, 68, 0x4, 3000, 75000),
+ PIN_RSEL(53, 68, 0x5, 2000, 5000),
+ PIN_RSEL(53, 68, 0x6, 1500, 75000),
+ PIN_RSEL(53, 68, 0x7, 1000, 5000),
+ PIN_RSEL(175, 176, 0x0, 75000, 75000),
+ PIN_RSEL(175, 176, 0x1, 10000, 5000),
+ PIN_RSEL(175, 176, 0x2, 5000, 75000),
+ PIN_RSEL(175, 176, 0x3, 4000, 5000),
+ PIN_RSEL(175, 176, 0x4, 3000, 75000),
+ PIN_RSEL(175, 176, 0x5, 2000, 5000),
+ PIN_RSEL(175, 176, 0x6, 1500, 75000),
+ PIN_RSEL(175, 176, 0x7, 1000, 5000),
+};
+
+static const unsigned int mt8188_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE, /*0*/
+ MTK_PULL_PU_PD_TYPE, /*1*/
+ MTK_PULL_PU_PD_TYPE, /*2*/
+ MTK_PULL_PU_PD_TYPE, /*3*/
+ MTK_PULL_PU_PD_TYPE, /*4*/
+ MTK_PULL_PU_PD_TYPE, /*5*/
+ MTK_PULL_PU_PD_TYPE, /*6*/
+ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/
+ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/
+ MTK_PULL_PU_PD_TYPE, /*11*/
+ MTK_PULL_PU_PD_TYPE, /*12*/
+ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/
+ MTK_PULL_PU_PD_TYPE, /*15*/
+ MTK_PULL_PU_PD_TYPE, /*16*/
+ MTK_PULL_PU_PD_TYPE, /*17*/
+ MTK_PULL_PU_PD_TYPE, /*18*/
+ MTK_PULL_PU_PD_TYPE, /*19*/
+ MTK_PULL_PU_PD_TYPE, /*20*/
+ MTK_PULL_PU_PD_TYPE, /*21*/
+ MTK_PULL_PU_PD_TYPE, /*22*/
+ MTK_PULL_PU_PD_TYPE, /*23*/
+ MTK_PULL_PU_PD_TYPE, /*24*/
+ MTK_PULL_PU_PD_TYPE, /*25*/
+ MTK_PULL_PU_PD_TYPE, /*26*/
+ MTK_PULL_PU_PD_TYPE, /*27*/
+ MTK_PULL_PU_PD_TYPE, /*28*/
+ MTK_PULL_PU_PD_TYPE, /*29*/
+ MTK_PULL_PU_PD_TYPE, /*30*/
+ MTK_PULL_PU_PD_TYPE, /*31*/
+ MTK_PULL_PU_PD_TYPE, /*32*/
+ MTK_PULL_PU_PD_TYPE, /*33*/
+ MTK_PULL_PU_PD_TYPE, /*34*/
+ MTK_PULL_PU_PD_TYPE, /*35*/
+ MTK_PULL_PU_PD_TYPE, /*36*/
+ MTK_PULL_PU_PD_TYPE, /*37*/
+ MTK_PULL_PU_PD_TYPE, /*38*/
+ MTK_PULL_PU_PD_TYPE, /*39*/
+ MTK_PULL_PU_PD_TYPE, /*40*/
+ MTK_PULL_PU_PD_TYPE, /*41*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*42*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*43*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*44*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*45*/
+ MTK_PULL_PU_PD_TYPE, /*46*/
+ MTK_PULL_PU_PD_TYPE, /*47*/
+ MTK_PULL_PU_PD_TYPE, /*48*/
+ MTK_PULL_PU_PD_TYPE, /*49*/
+ MTK_PULL_PU_PD_TYPE, /*50*/
+ MTK_PULL_PU_PD_TYPE, /*51*/
+ MTK_PULL_PU_PD_TYPE, /*52*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*53*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*54*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*55*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*56*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*57*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*58*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*59*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*60*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*61*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*62*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*63*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*64*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*65*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*66*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*67*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*68*/
+ MTK_PULL_PU_PD_TYPE, /*69*/
+ MTK_PULL_PU_PD_TYPE, /*70*/
+ MTK_PULL_PU_PD_TYPE, /*71*/
+ MTK_PULL_PU_PD_TYPE, /*72*/
+ MTK_PULL_PU_PD_TYPE, /*73*/
+ MTK_PULL_PU_PD_TYPE, /*74*/
+ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/
+ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/
+ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PU_PD_TYPE, /*80*/
+ MTK_PULL_PU_PD_TYPE, /*81*/
+ MTK_PULL_PU_PD_TYPE, /*82*/
+ MTK_PULL_PU_PD_TYPE, /*83*/
+ MTK_PULL_PU_PD_TYPE, /*84*/
+ MTK_PULL_PU_PD_TYPE, /*85*/
+ MTK_PULL_PU_PD_TYPE, /*86*/
+ MTK_PULL_PU_PD_TYPE, /*87*/
+ MTK_PULL_PU_PD_TYPE, /*88*/
+ MTK_PULL_PU_PD_TYPE, /*89*/
+ MTK_PULL_PU_PD_TYPE, /*90*/
+ MTK_PULL_PU_PD_TYPE, /*91*/
+ MTK_PULL_PU_PD_TYPE, /*92*/
+ MTK_PULL_PU_PD_TYPE, /*93*/
+ MTK_PULL_PU_PD_TYPE, /*94*/
+ MTK_PULL_PU_PD_TYPE, /*95*/
+ MTK_PULL_PU_PD_TYPE, /*96*/
+ MTK_PULL_PU_PD_TYPE, /*97*/
+ MTK_PULL_PU_PD_TYPE, /*98*/
+ MTK_PULL_PU_PD_TYPE, /*99*/
+ MTK_PULL_PU_PD_TYPE, /*100*/
+ MTK_PULL_PU_PD_TYPE, /*101*/
+ MTK_PULL_PU_PD_TYPE, /*102*/
+ MTK_PULL_PU_PD_TYPE, /*103*/
+ MTK_PULL_PU_PD_TYPE, /*104*/
+ MTK_PULL_PU_PD_TYPE, /*105*/
+ MTK_PULL_PU_PD_TYPE, /*106*/
+ MTK_PULL_PU_PD_TYPE, /*107*/
+ MTK_PULL_PU_PD_TYPE, /*108*/
+ MTK_PULL_PU_PD_TYPE, /*109*/
+ MTK_PULL_PU_PD_TYPE, /*110*/
+ MTK_PULL_PU_PD_TYPE, /*111*/
+ MTK_PULL_PU_PD_TYPE, /*112*/
+ MTK_PULL_PU_PD_TYPE, /*113*/
+ MTK_PULL_PU_PD_TYPE, /*114*/
+ MTK_PULL_PU_PD_TYPE, /*115*/
+ MTK_PULL_PU_PD_TYPE, /*116*/
+ MTK_PULL_PU_PD_TYPE, /*117*/
+ MTK_PULL_PU_PD_TYPE, /*118*/
+ MTK_PULL_PU_PD_TYPE, /*119*/
+ MTK_PULL_PU_PD_TYPE, /*120*/
+ MTK_PULL_PU_PD_TYPE, /*121*/
+ MTK_PULL_PU_PD_TYPE, /*122*/
+ MTK_PULL_PU_PD_TYPE, /*123*/
+ MTK_PULL_PU_PD_TYPE, /*124*/
+ MTK_PULL_PU_PD_TYPE, /*125*/
+ MTK_PULL_PU_PD_TYPE, /*126*/
+ MTK_PULL_PU_PD_TYPE, /*127*/
+ MTK_PULL_PU_PD_TYPE, /*128*/
+ MTK_PULL_PU_PD_TYPE, /*129*/
+ MTK_PULL_PU_PD_TYPE, /*130*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*131*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*132*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*133*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*134*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*135*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*136*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*137*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*138*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*139*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*140*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*141*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*142*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*143*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*144*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*145*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*146*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*147*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*148*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*149*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*150*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*151*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*152*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*153*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*154*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*155*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*156*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*157*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*158*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*159*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*160*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*161*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*162*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*163*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*164*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*165*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*166*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*167*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*168*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*169*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*170*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*171*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*172*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*173*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*174*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*175*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*176*/
+};
+
+static const struct mtk_pin_reg_calc mt8188_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8188_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8188_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8188_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8188_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8188_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8188_pin_ies_range),
+ [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt8188_pin_tdsel_range),
+ [PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt8188_pin_rdsel_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8188_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8188_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8188_pin_r1_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8188_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8188_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8188_pin_drv_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8188_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8188_pin_rsel_range),
+};
+
+static const char * const mt8188_pinctrl_register_base_name[] = {
+ "iocfg0", "iocfg_rm", "iocfg_lt", "iocfg_lm", "iocfg_rt",
+};
+
+static const struct mtk_eint_hw mt8188_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 7,
+ .ap_num = 225,
+ .db_cnt = 32,
+};
+
+static const struct mtk_pin_soc mt8188_data = {
+ .reg_cal = mt8188_reg_cals,
+ .pins = mtk_pins_mt8188,
+ .npins = ARRAY_SIZE(mtk_pins_mt8188),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8188),
+ .eint_hw = &mt8188_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8188_pinctrl_register_base_name,
+ .nbase_names = ARRAY_SIZE(mt8188_pinctrl_register_base_name),
+ .pull_type = mt8188_pull_type,
+ .pin_rsel = mt8188_pin_rsel_val_range,
+ .npin_rsel = ARRAY_SIZE(mt8188_pin_rsel_val_range),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+};
+
+static const struct of_device_id mt8188_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8188-pinctrl", .data = &mt8188_data },
+ { }
+};
+
+static struct platform_driver mt8188_pinctrl_driver = {
+ .driver = {
+ .name = "mt8188-pinctrl",
+ .of_match_table = mt8188_pinctrl_of_match,
+ .pm = &mtk_paris_pinctrl_pm_ops
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt8188_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8188_pinctrl_driver);
+}
+
+arch_initcall(mt8188_pinctrl_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek MT8188 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h
new file mode 100644
index 000000000000..a487323748e2
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8188.h
@@ -0,0 +1,2259 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8188_H
+#define __PINCTRL_MTK_MT8188_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8188[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO0"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_SPIM5_CSB"),
+ MTK_FUNCTION(3, "O_UTXD1"),
+ MTK_FUNCTION(4, "O_DMIC3_CLK"),
+ MTK_FUNCTION(5, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(6, "O_I2SO2_MCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A0")
+ ),
+
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO1"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "O_SPIM5_CLK"),
+ MTK_FUNCTION(3, "I1_URXD1"),
+ MTK_FUNCTION(4, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(5, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(6, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A1")
+ ),
+
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO2"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "B0_SPIM5_MOSI"),
+ MTK_FUNCTION(3, "O_URTS1"),
+ MTK_FUNCTION(4, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(5, "B0_I2SIN_WS"),
+ MTK_FUNCTION(6, "B0_I2SO2_WS"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A2")
+ ),
+
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO3"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "B0_SPIM5_MISO"),
+ MTK_FUNCTION(3, "I1_UCTS1"),
+ MTK_FUNCTION(4, "O_DMIC4_CLK"),
+ MTK_FUNCTION(5, "I0_I2SIN_D0"),
+ MTK_FUNCTION(6, "O_I2SO2_D0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A3")
+ ),
+
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO4"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(5, "I0_I2SIN_D1"),
+ MTK_FUNCTION(6, "O_I2SO2_D1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A4")
+ ),
+
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO5"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(5, "I0_I2SIN_D2"),
+ MTK_FUNCTION(6, "O_I2SO2_D2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A5")
+ ),
+
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO6"),
+ MTK_FUNCTION(1, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_DMIC1_CLK"),
+ MTK_FUNCTION(5, "I0_I2SIN_D3"),
+ MTK_FUNCTION(6, "O_I2SO2_D3"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO0")
+ ),
+
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO7"),
+ MTK_FUNCTION(1, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "O_SPIM3_CSB"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(4, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(5, "O_CMVREF0"),
+ MTK_FUNCTION(6, "O_CLKM0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A6")
+ ),
+
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO8"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_SPIM3_CLK"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(4, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(5, "O_CMVREF1"),
+ MTK_FUNCTION(6, "O_CLKM1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A7")
+ ),
+
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO9"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "B0_SPIM3_MOSI"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(4, "O_DMIC2_CLK"),
+ MTK_FUNCTION(5, "O_CMFLASH0"),
+ MTK_FUNCTION(6, "O_PWM_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A8")
+ ),
+
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO10"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "B0_SPIM3_MISO"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(4, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(5, "O_CMFLASH1"),
+ MTK_FUNCTION(6, "O_PWM_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A9")
+ ),
+
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO11"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "O_SPDIF_OUT"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "O_CMVREF6"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A10")
+ ),
+
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO12"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "O_SPIM4_CSB"),
+ MTK_FUNCTION(3, "B1_JTMS_SEL3"),
+ MTK_FUNCTION(4, "B1_APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TMS"),
+ MTK_FUNCTION(7, "I0_HDMITX20_HTPLG")
+ ),
+
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO13"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "O_SPIM4_CLK"),
+ MTK_FUNCTION(3, "I0_JTCK_SEL3"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TCK"),
+ MTK_FUNCTION(7, "B1_HDMITX20_CEC")
+ ),
+
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO14"),
+ MTK_FUNCTION(1, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MOSI"),
+ MTK_FUNCTION(3, "I1_JTDI_SEL3"),
+ MTK_FUNCTION(4, "I1_APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TDI"),
+ MTK_FUNCTION(7, "B1_HDMITX20_SCL")
+ ),
+
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO15"),
+ MTK_FUNCTION(1, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MISO"),
+ MTK_FUNCTION(3, "O_JTDO_SEL3"),
+ MTK_FUNCTION(4, "O_APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "O_VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "O_IPU_JTAG_TDO"),
+ MTK_FUNCTION(7, "B1_HDMITX20_SDA")
+ ),
+
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO16"),
+ MTK_FUNCTION(1, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "I1_JTRSTn_SEL3"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_NTRST"),
+ MTK_FUNCTION(6, "I0_IPU_JTAG_TRST"),
+ MTK_FUNCTION(7, "O_HDMITX20_PWR5V")
+ ),
+
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO17"),
+ MTK_FUNCTION(1, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "O_CMFLASH2"),
+ MTK_FUNCTION(4, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "O_CMVREF7"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO1")
+ ),
+
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO18"),
+ MTK_FUNCTION(1, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH0"),
+ MTK_FUNCTION(3, "O_CMVREF4"),
+ MTK_FUNCTION(4, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(5, "O_UTXD1"),
+ MTK_FUNCTION(6, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A11")
+ ),
+
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO19"),
+ MTK_FUNCTION(1, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH1"),
+ MTK_FUNCTION(3, "O_CMVREF5"),
+ MTK_FUNCTION(4, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(5, "I1_URXD1"),
+ MTK_FUNCTION(6, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A12")
+ ),
+
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO20"),
+ MTK_FUNCTION(1, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH2"),
+ MTK_FUNCTION(3, "O_CLKM2"),
+ MTK_FUNCTION(4, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(5, "O_URTS1"),
+ MTK_FUNCTION(6, "O_TP_URTS1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A13")
+ ),
+
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO21"),
+ MTK_FUNCTION(1, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "O_CMFLASH3"),
+ MTK_FUNCTION(3, "O_CLKM3"),
+ MTK_FUNCTION(4, "I0_TDMIN_DI"),
+ MTK_FUNCTION(5, "I1_UCTS1"),
+ MTK_FUNCTION(6, "I1_TP_UCTS1_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A14")
+ ),
+
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO22"),
+ MTK_FUNCTION(1, "O_CMMCLK0"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A15")
+ ),
+
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO23"),
+ MTK_FUNCTION(1, "O_CMMCLK1"),
+ MTK_FUNCTION(3, "O_PWM_2"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A16")
+ ),
+
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO24"),
+ MTK_FUNCTION(1, "O_CMMCLK2"),
+ MTK_FUNCTION(3, "O_PWM_3"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA"),
+ MTK_FUNCTION(5, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO2")
+ ),
+
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO25"),
+ MTK_FUNCTION(1, "O_LCM_RST"),
+ MTK_FUNCTION(2, "O_LCM1_RST"),
+ MTK_FUNCTION(3, "I0_DP_TX_HPD")
+ ),
+
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO26"),
+ MTK_FUNCTION(1, "I0_DSI_TE"),
+ MTK_FUNCTION(2, "I0_DSI1_TE"),
+ MTK_FUNCTION(3, "I0_EDP_TX_HPD")
+ ),
+
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO27"),
+ MTK_FUNCTION(1, "O_LCM1_RST"),
+ MTK_FUNCTION(2, "O_LCM_RST"),
+ MTK_FUNCTION(3, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(4, "O_CMVREF2"),
+ MTK_FUNCTION(5, "O_mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "O_PWM_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A17")
+ ),
+
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO28"),
+ MTK_FUNCTION(1, "I0_DSI1_TE"),
+ MTK_FUNCTION(2, "I0_DSI_TE"),
+ MTK_FUNCTION(3, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(4, "O_CMVREF3"),
+ MTK_FUNCTION(5, "O_mbistreaden_trigger"),
+ MTK_FUNCTION(6, "O_PWM_3"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A18")
+ ),
+
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO29"),
+ MTK_FUNCTION(1, "O_DISP_PWM0"),
+ MTK_FUNCTION(2, "O_DISP_PWM1")
+ ),
+
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO30"),
+ MTK_FUNCTION(1, "O_DISP_PWM1"),
+ MTK_FUNCTION(2, "O_DISP_PWM0"),
+ MTK_FUNCTION(3, "O_CMFLASH3"),
+ MTK_FUNCTION(4, "O_PWM_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A19")
+ ),
+
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO31"),
+ MTK_FUNCTION(1, "O_UTXD0"),
+ MTK_FUNCTION(2, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "O_MD32_0_TXD"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "O_SSPM_UTXD_AO")
+ ),
+
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO32"),
+ MTK_FUNCTION(1, "I1_URXD0"),
+ MTK_FUNCTION(2, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(5, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "I1_SSPM_URXD_AO")
+ ),
+
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO33"),
+ MTK_FUNCTION(1, "O_UTXD1"),
+ MTK_FUNCTION(2, "O_URTS2"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_UTXD1_AO"),
+ MTK_FUNCTION(5, "O_mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "O_MD32_0_TXD"),
+ MTK_FUNCTION(7, "O_SSPM_UTXD_AO")
+ ),
+
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO34"),
+ MTK_FUNCTION(1, "I1_URXD1"),
+ MTK_FUNCTION(2, "I1_UCTS2"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_URXD1_AO"),
+ MTK_FUNCTION(5, "O_mbistreaden_trigger"),
+ MTK_FUNCTION(6, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(7, "I1_SSPM_URXD_AO")
+ ),
+
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO35"),
+ MTK_FUNCTION(1, "O_UTXD2"),
+ MTK_FUNCTION(2, "O_URTS1"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_TP_URTS1_AO"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A20")
+ ),
+
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO36"),
+ MTK_FUNCTION(1, "I1_URXD2"),
+ MTK_FUNCTION(2, "I1_UCTS1"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_TP_UCTS1_AO"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A21")
+ ),
+
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO37"),
+ MTK_FUNCTION(1, "B1_JTMS_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TMS"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO38"),
+ MTK_FUNCTION(1, "I0_JTCK_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TCK"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO39"),
+ MTK_FUNCTION(1, "I1_JTDI_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_TDI"),
+ MTK_FUNCTION(3, "I1_SPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I1_MCUPM_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO40"),
+ MTK_FUNCTION(1, "O_JTDO_SEL1"),
+ MTK_FUNCTION(2, "O_UDI_TDO"),
+ MTK_FUNCTION(3, "O_SPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "O_CCU0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_MCUPM_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO41"),
+ MTK_FUNCTION(1, "I1_JTRSTn_SEL1"),
+ MTK_FUNCTION(2, "I0_UDI_NTRST"),
+ MTK_FUNCTION(3, "I0_SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I0_MCUPM_JTAG_TRSTN")
+ ),
+
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO42"),
+ MTK_FUNCTION(1, "B1_KPCOL0")
+ ),
+
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO43"),
+ MTK_FUNCTION(1, "B1_KPCOL1"),
+ MTK_FUNCTION(2, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(3, "O_CMFLASH2"),
+ MTK_FUNCTION(4, "I0_DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "O_mbistwriteen_trigger")
+ ),
+
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO44"),
+ MTK_FUNCTION(1, "B1_KPROW0")
+ ),
+
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO45"),
+ MTK_FUNCTION(1, "B1_KPROW1"),
+ MTK_FUNCTION(2, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(3, "O_CMFLASH3"),
+ MTK_FUNCTION(4, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(7, "O_mbistreaden_trigger")
+ ),
+
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO46"),
+ MTK_FUNCTION(1, "I0_DP_TX_HPD"),
+ MTK_FUNCTION(2, "O_PWM_0"),
+ MTK_FUNCTION(3, "I0_VBUSVALID_2P"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A22")
+ ),
+
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO47"),
+ MTK_FUNCTION(1, "I1_WAKEN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET0")
+ ),
+
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO48"),
+ MTK_FUNCTION(1, "O_PERSTN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET1")
+ ),
+
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO49"),
+ MTK_FUNCTION(1, "B1_CLKREQN"),
+ MTK_FUNCTION(6, "O_GDU_TROOPS_DET2")
+ ),
+
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO50"),
+ MTK_FUNCTION(1, "O_HDMITX20_PWR5V"),
+ MTK_FUNCTION(3, "I1_IDDIG_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO51"),
+ MTK_FUNCTION(1, "I0_HDMITX20_HTPLG"),
+ MTK_FUNCTION(2, "I0_EDP_TX_HPD"),
+ MTK_FUNCTION(3, "O_USB_DRVVBUS_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO52"),
+ MTK_FUNCTION(1, "B1_HDMITX20_CEC"),
+ MTK_FUNCTION(3, "I0_VBUSVALID_1P"),
+ MTK_FUNCTION(4, "I1_SCP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "I1_SSPM_JTAG_TDI"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO53"),
+ MTK_FUNCTION(1, "B1_HDMITX20_SCL"),
+ MTK_FUNCTION(3, "I1_IDDIG_2P"),
+ MTK_FUNCTION(4, "O_SCP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "O_SSPM_JTAG_TDO"),
+ MTK_FUNCTION(6, "O_MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_MD32_1_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO54"),
+ MTK_FUNCTION(1, "B1_HDMITX20_SDA"),
+ MTK_FUNCTION(3, "O_USB_DRVVBUS_2P"),
+ MTK_FUNCTION(4, "I0_SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "I0_SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "I1_MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I1_MD32_1_JTAG_TRST")
+ ),
+
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO55"),
+ MTK_FUNCTION(1, "B1_SCL0"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO56"),
+ MTK_FUNCTION(1, "B1_SDA0"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO57"),
+ MTK_FUNCTION(1, "B1_SCL1")
+ ),
+
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO58"),
+ MTK_FUNCTION(1, "B1_SDA1")
+ ),
+
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO59"),
+ MTK_FUNCTION(1, "B1_SCL2"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1")
+ ),
+
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO60"),
+ MTK_FUNCTION(1, "B1_SDA2"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1")
+ ),
+
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO61"),
+ MTK_FUNCTION(1, "B1_SCL3"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO62"),
+ MTK_FUNCTION(1, "B1_SDA3"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO63"),
+ MTK_FUNCTION(1, "B1_SCL4")
+ ),
+
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO64"),
+ MTK_FUNCTION(1, "B1_SDA4")
+ ),
+
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO65"),
+ MTK_FUNCTION(1, "B1_SCL5"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1")
+ ),
+
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO66"),
+ MTK_FUNCTION(1, "B1_SDA5"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1")
+ ),
+
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO67"),
+ MTK_FUNCTION(1, "B1_SCL6"),
+ MTK_FUNCTION(2, "B1_SCP_SCL0"),
+ MTK_FUNCTION(3, "B1_SCP_SCL1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO68"),
+ MTK_FUNCTION(1, "B1_SDA6"),
+ MTK_FUNCTION(2, "B1_SCP_SDA0"),
+ MTK_FUNCTION(3, "B1_SCP_SDA1"),
+ MTK_FUNCTION(4, "B1_PCIE_PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO69"),
+ MTK_FUNCTION(1, "O_SPIM0_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO0"),
+ MTK_FUNCTION(5, "O_CMVREF0"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A23")
+ ),
+
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO70"),
+ MTK_FUNCTION(1, "O_SPIM0_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO1"),
+ MTK_FUNCTION(5, "O_CMVREF1"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A24")
+ ),
+
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO71"),
+ MTK_FUNCTION(1, "B0_SPIM0_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "B0_MD32_1_GPIO2"),
+ MTK_FUNCTION(5, "O_CMVREF2"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP0_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A25")
+ ),
+
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO72"),
+ MTK_FUNCTION(1, "B0_SPIM0_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(5, "O_CMVREF3"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A26")
+ ),
+
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO73"),
+ MTK_FUNCTION(1, "B0_SPIM0_MIO2"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_CLKM0"),
+ MTK_FUNCTION(5, "O_CMVREF4"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A27")
+ ),
+
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO74"),
+ MTK_FUNCTION(1, "B0_SPIM0_MIO3"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_CLKM1"),
+ MTK_FUNCTION(5, "O_CMVREF5"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP1_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A28")
+ ),
+
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO75"),
+ MTK_FUNCTION(1, "O_SPIM1_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(4, "B1_SCP_SCL0"),
+ MTK_FUNCTION(5, "O_CMVREF6"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A29")
+ ),
+
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO76"),
+ MTK_FUNCTION(1, "O_SPIM1_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(4, "B1_SCP_SDA0"),
+ MTK_FUNCTION(5, "O_CMVREF7"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A30")
+ ),
+
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO77"),
+ MTK_FUNCTION(1, "B0_SPIM1_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(4, "B1_SCP_SCL1"),
+ MTK_FUNCTION(6, "O_GDU_SUM_TROOP2_2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A31")
+ ),
+
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO78"),
+ MTK_FUNCTION(1, "B0_SPIM1_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(4, "B1_SCP_SDA1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_A32")
+ ),
+
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO79"),
+ MTK_FUNCTION(1, "O_SPIM2_CSB"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_CS"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "O_UTXD2"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "B0_PCM_SYNC"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B0")
+ ),
+
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO80"),
+ MTK_FUNCTION(1, "O_SPIM2_CLK"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_CK"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "I1_URXD2"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "B0_PCM_CLK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B1")
+ ),
+
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO81"),
+ MTK_FUNCTION(1, "B0_SPIM2_MOSI"),
+ MTK_FUNCTION(2, "O_SCP_SPI2_MO"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_URTS2"),
+ MTK_FUNCTION(5, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(6, "O_PCM_DO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B2")
+ ),
+
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO82"),
+ MTK_FUNCTION(1, "B0_SPIM2_MISO"),
+ MTK_FUNCTION(2, "I0_SCP_SPI2_MI"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "I1_UCTS2"),
+ MTK_FUNCTION(5, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "I0_PCM_DI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B3")
+ ),
+
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO83"),
+ MTK_FUNCTION(1, "I1_IDDIG")
+ ),
+
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO84"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS")
+ ),
+
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO85"),
+ MTK_FUNCTION(1, "I0_VBUSVALID")
+ ),
+
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO86"),
+ MTK_FUNCTION(1, "I1_IDDIG_1P"),
+ MTK_FUNCTION(2, "O_UTXD1"),
+ MTK_FUNCTION(3, "O_URTS2"),
+ MTK_FUNCTION(4, "O_PWM_2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_ST0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B4")
+ ),
+
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO87"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "I1_URXD1"),
+ MTK_FUNCTION(3, "I1_UCTS2"),
+ MTK_FUNCTION(4, "O_PWM_3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B5")
+ ),
+
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO88"),
+ MTK_FUNCTION(1, "I0_VBUSVALID_1P"),
+ MTK_FUNCTION(2, "O_UTXD2"),
+ MTK_FUNCTION(3, "O_URTS1"),
+ MTK_FUNCTION(4, "O_CLKM2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_ST1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B6")
+ ),
+
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO89"),
+ MTK_FUNCTION(1, "I1_IDDIG_2P"),
+ MTK_FUNCTION(2, "I1_URXD2"),
+ MTK_FUNCTION(3, "I1_UCTS1"),
+ MTK_FUNCTION(4, "O_CLKM3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B7")
+ ),
+
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO90"),
+ MTK_FUNCTION(1, "O_USB_DRVVBUS_2P"),
+ MTK_FUNCTION(2, "O_UTXD3"),
+ MTK_FUNCTION(3, "O_ADSP_UTXD0"),
+ MTK_FUNCTION(4, "O_SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "O_MD32_0_TXD"),
+ MTK_FUNCTION(6, "O_MD32_1_TXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B8")
+ ),
+
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO91"),
+ MTK_FUNCTION(1, "I0_VBUSVALID_2P"),
+ MTK_FUNCTION(2, "I1_URXD3"),
+ MTK_FUNCTION(3, "I1_ADSP_URXD0"),
+ MTK_FUNCTION(4, "I1_SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "I1_MD32_0_RXD"),
+ MTK_FUNCTION(6, "I1_MD32_1_RXD"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B9")
+ ),
+
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO92"),
+ MTK_FUNCTION(1, "O_PWRAP_SPI0_CSN")
+ ),
+
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO93"),
+ MTK_FUNCTION(1, "O_PWRAP_SPI0_CK")
+ ),
+
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO94"),
+ MTK_FUNCTION(1, "B0_PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "B0_PWRAP_SPI0_MI")
+ ),
+
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO95"),
+ MTK_FUNCTION(1, "B0_PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "B0_PWRAP_SPI0_MO")
+ ),
+
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO96"),
+ MTK_FUNCTION(1, "O_SRCLKENA0")
+ ),
+
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO97"),
+ MTK_FUNCTION(1, "O_SRCLKENA1")
+ ),
+
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO98"),
+ MTK_FUNCTION(1, "O_SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "I0_DVFSRC_EXT_REQ")
+ ),
+
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO99"),
+ MTK_FUNCTION(1, "I0_RTC32K_CK")
+ ),
+
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO100"),
+ MTK_FUNCTION(1, "O_WATCHDOG")
+ ),
+
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO101"),
+ MTK_FUNCTION(1, "O_AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "O_I2SO1_MCK"),
+ MTK_FUNCTION(3, "B0_I2SIN_BCK")
+ ),
+
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO102"),
+ MTK_FUNCTION(1, "O_AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "O_I2SO1_BCK"),
+ MTK_FUNCTION(3, "B0_I2SIN_WS")
+ ),
+
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO103"),
+ MTK_FUNCTION(1, "O_AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "O_I2SO1_WS"),
+ MTK_FUNCTION(3, "I0_I2SIN_D0")
+ ),
+
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO104"),
+ MTK_FUNCTION(1, "O_AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "O_I2SO1_D0"),
+ MTK_FUNCTION(3, "I0_I2SIN_D1")
+ ),
+
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO105"),
+ MTK_FUNCTION(1, "I0_AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "I0_VOW_DAT_MISO"),
+ MTK_FUNCTION(3, "I0_I2SIN_D2")
+ ),
+
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO106"),
+ MTK_FUNCTION(1, "I0_AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "I0_VOW_CLK_MISO"),
+ MTK_FUNCTION(3, "I0_I2SIN_D3")
+ ),
+
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO107"),
+ MTK_FUNCTION(1, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(2, "I0_SPLIN_MCK"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(4, "O_CMVREF4"),
+ MTK_FUNCTION(5, "O_AUXIF_ST0"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO108"),
+ MTK_FUNCTION(1, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(2, "I0_SPLIN_LRCK"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(4, "O_CMVREF5"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR1"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B10")
+ ),
+
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO109"),
+ MTK_FUNCTION(1, "B0_I2SIN_WS"),
+ MTK_FUNCTION(2, "I0_SPLIN_BCK"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_CMVREF6"),
+ MTK_FUNCTION(5, "O_AUXIF_ST1"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR2"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B11")
+ ),
+
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO110"),
+ MTK_FUNCTION(1, "I0_I2SIN_D0"),
+ MTK_FUNCTION(2, "I0_SPLIN_D0"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_CMVREF7"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR3"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B12")
+ ),
+
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO111"),
+ MTK_FUNCTION(1, "I0_I2SIN_D1"),
+ MTK_FUNCTION(2, "I0_SPLIN_D1"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "O_SPDIF_OUT"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR4"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B13")
+ ),
+
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO112"),
+ MTK_FUNCTION(1, "I0_I2SIN_D2"),
+ MTK_FUNCTION(2, "I0_SPLIN_D2"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(5, "O_I2SO1_WS"),
+ MTK_FUNCTION(6, "O_PGD_LV_LSC_PWR5"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B14")
+ ),
+
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO113"),
+ MTK_FUNCTION(1, "I0_I2SIN_D3"),
+ MTK_FUNCTION(2, "I0_SPLIN_D3"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(5, "O_I2SO1_D0"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B15")
+ ),
+
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO114"),
+ MTK_FUNCTION(1, "O_I2SO2_MCK"),
+ MTK_FUNCTION(2, "B0_I2SIN_MCK"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "B1_APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B16")
+ ),
+
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO115"),
+ MTK_FUNCTION(1, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(2, "B0_I2SIN_BCK"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B17")
+ ),
+
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO116"),
+ MTK_FUNCTION(1, "B0_I2SO2_WS"),
+ MTK_FUNCTION(2, "B0_I2SIN_WS"),
+ MTK_FUNCTION(3, "I1_MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "I1_SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B18")
+ ),
+
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO117"),
+ MTK_FUNCTION(1, "O_I2SO2_D0"),
+ MTK_FUNCTION(2, "I0_I2SIN_D0"),
+ MTK_FUNCTION(3, "O_MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "O_SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B19")
+ ),
+
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO118"),
+ MTK_FUNCTION(1, "O_I2SO2_D1"),
+ MTK_FUNCTION(2, "I0_I2SIN_D1"),
+ MTK_FUNCTION(3, "I0_MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "I0_APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "I0_SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B20")
+ ),
+
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO119"),
+ MTK_FUNCTION(1, "O_I2SO2_D2"),
+ MTK_FUNCTION(2, "I0_I2SIN_D2"),
+ MTK_FUNCTION(3, "O_UTXD3"),
+ MTK_FUNCTION(4, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(5, "O_I2SO1_MCK"),
+ MTK_FUNCTION(6, "O_SSPM_UTXD_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B21")
+ ),
+
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO120"),
+ MTK_FUNCTION(1, "O_I2SO2_D3"),
+ MTK_FUNCTION(2, "I0_I2SIN_D3"),
+ MTK_FUNCTION(3, "I1_URXD3"),
+ MTK_FUNCTION(4, "I0_TDMIN_DI"),
+ MTK_FUNCTION(5, "O_I2SO1_BCK"),
+ MTK_FUNCTION(6, "I1_SSPM_URXD_AO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B22")
+ ),
+
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO121"),
+ MTK_FUNCTION(1, "B0_PCM_CLK"),
+ MTK_FUNCTION(2, "O_SPIM4_CSB"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_CS"),
+ MTK_FUNCTION(4, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_ST0"),
+ MTK_FUNCTION(6, "O_PGD_DA_EFUSE_RDY"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B23")
+ ),
+
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO122"),
+ MTK_FUNCTION(1, "B0_PCM_SYNC"),
+ MTK_FUNCTION(2, "O_SPIM4_CLK"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_CK"),
+ MTK_FUNCTION(4, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK0"),
+ MTK_FUNCTION(6, "O_PGD_DA_EFUSE_RDY_PRE"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B24")
+ ),
+
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO123"),
+ MTK_FUNCTION(1, "O_PCM_DO"),
+ MTK_FUNCTION(2, "B0_SPIM4_MOSI"),
+ MTK_FUNCTION(3, "O_SCP_SPI1_B_MO"),
+ MTK_FUNCTION(4, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_ST1"),
+ MTK_FUNCTION(6, "O_PGD_DA_PWRGD_RESET"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B25")
+ ),
+
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO124"),
+ MTK_FUNCTION(1, "I0_PCM_DI"),
+ MTK_FUNCTION(2, "B0_SPIM4_MISO"),
+ MTK_FUNCTION(3, "I0_SCP_SPI1_B_MI"),
+ MTK_FUNCTION(4, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(5, "O_AUXIF_CLK1"),
+ MTK_FUNCTION(6, "O_PGD_DA_PWRGD_ENB"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B26")
+ ),
+
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO125"),
+ MTK_FUNCTION(1, "O_DMIC1_CLK"),
+ MTK_FUNCTION(2, "O_SPINOR_CK"),
+ MTK_FUNCTION(3, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(6, "O_LVTS_FOUT"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B27")
+ ),
+
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO126"),
+ MTK_FUNCTION(1, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(2, "O_SPINOR_CS"),
+ MTK_FUNCTION(3, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(6, "O_LVTS_SDO"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B28")
+ ),
+
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO127"),
+ MTK_FUNCTION(1, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO0"),
+ MTK_FUNCTION(3, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(6, "I0_LVTS_26M"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B29")
+ ),
+
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO128"),
+ MTK_FUNCTION(1, "O_DMIC2_CLK"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO1"),
+ MTK_FUNCTION(3, "I0_TDMIN_DI"),
+ MTK_FUNCTION(6, "I0_LVTS_SCF"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B30")
+ ),
+
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO129"),
+ MTK_FUNCTION(1, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO2"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(6, "I0_LVTS_SCK"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B31")
+ ),
+
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO130"),
+ MTK_FUNCTION(1, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(2, "B0_SPINOR_IO3"),
+ MTK_FUNCTION(3, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(6, "I0_LVTS_SDI"),
+ MTK_FUNCTION(7, "B0_DBG_MON_B32")
+ ),
+
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO131"),
+ MTK_FUNCTION(1, "O_DPI_D0"),
+ MTK_FUNCTION(2, "O_GBE_TXD3"),
+ MTK_FUNCTION(3, "O_DMIC1_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_MCK"),
+ MTK_FUNCTION(5, "B0_TP_GPIO0_AO"),
+ MTK_FUNCTION(6, "O_SPIM5_CSB"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO132"),
+ MTK_FUNCTION(1, "O_DPI_D1"),
+ MTK_FUNCTION(2, "O_GBE_TXD2"),
+ MTK_FUNCTION(3, "I0_DMIC1_DAT"),
+ MTK_FUNCTION(4, "B0_I2SO2_BCK"),
+ MTK_FUNCTION(5, "B0_TP_GPIO1_AO"),
+ MTK_FUNCTION(6, "O_SPIM5_CLK"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR1")
+ ),
+
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO133"),
+ MTK_FUNCTION(1, "O_DPI_D2"),
+ MTK_FUNCTION(2, "O_GBE_TXD1"),
+ MTK_FUNCTION(3, "I0_DMIC1_DAT_R"),
+ MTK_FUNCTION(4, "B0_I2SO2_WS"),
+ MTK_FUNCTION(5, "B0_TP_GPIO2_AO"),
+ MTK_FUNCTION(6, "B0_SPIM5_MOSI"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR2")
+ ),
+
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO134"),
+ MTK_FUNCTION(1, "O_DPI_D3"),
+ MTK_FUNCTION(2, "O_GBE_TXD0"),
+ MTK_FUNCTION(3, "O_DMIC2_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_D0"),
+ MTK_FUNCTION(5, "B0_TP_GPIO3_AO"),
+ MTK_FUNCTION(6, "B0_SPIM5_MISO"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR3")
+ ),
+
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO135"),
+ MTK_FUNCTION(1, "O_DPI_D4"),
+ MTK_FUNCTION(2, "I0_GBE_RXD3"),
+ MTK_FUNCTION(3, "I0_DMIC2_DAT"),
+ MTK_FUNCTION(4, "O_I2SO2_D1"),
+ MTK_FUNCTION(5, "B0_TP_GPIO4_AO"),
+ MTK_FUNCTION(6, "I1_WAKEN"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR4")
+ ),
+
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO136"),
+ MTK_FUNCTION(1, "O_DPI_D5"),
+ MTK_FUNCTION(2, "I0_GBE_RXD2"),
+ MTK_FUNCTION(3, "I0_DMIC2_DAT_R"),
+ MTK_FUNCTION(4, "O_I2SO2_D2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "O_PERSTN"),
+ MTK_FUNCTION(7, "O_PGD_LV_HSC_PWR5")
+ ),
+
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO137"),
+ MTK_FUNCTION(1, "O_DPI_D6"),
+ MTK_FUNCTION(2, "I0_GBE_RXD1"),
+ MTK_FUNCTION(3, "O_DMIC3_CLK"),
+ MTK_FUNCTION(4, "O_I2SO2_D3"),
+ MTK_FUNCTION(5, "B0_TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "B1_CLKREQN"),
+ MTK_FUNCTION(7, "O_PWM_0")
+ ),
+
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO138"),
+ MTK_FUNCTION(1, "O_DPI_D7"),
+ MTK_FUNCTION(2, "I0_GBE_RXD0"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT"),
+ MTK_FUNCTION(4, "O_CLKM2"),
+ MTK_FUNCTION(5, "B0_TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO0")
+ ),
+
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO139"),
+ MTK_FUNCTION(1, "O_DPI_D8"),
+ MTK_FUNCTION(2, "B0_GBE_TXC"),
+ MTK_FUNCTION(3, "I0_DMIC3_DAT_R"),
+ MTK_FUNCTION(4, "O_CLKM3"),
+ MTK_FUNCTION(5, "O_TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "O_UTXD2"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO1")
+ ),
+
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO140"),
+ MTK_FUNCTION(1, "O_DPI_D9"),
+ MTK_FUNCTION(2, "I0_GBE_RXC"),
+ MTK_FUNCTION(3, "O_DMIC4_CLK"),
+ MTK_FUNCTION(4, "O_PWM_2"),
+ MTK_FUNCTION(5, "I1_TP_URXD2_AO"),
+ MTK_FUNCTION(6, "I1_URXD2"),
+ MTK_FUNCTION(7, "B0_MD32_0_GPIO2")
+ ),
+
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO141"),
+ MTK_FUNCTION(1, "O_DPI_D10"),
+ MTK_FUNCTION(2, "I0_GBE_RXDV"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT"),
+ MTK_FUNCTION(4, "O_PWM_3"),
+ MTK_FUNCTION(5, "O_TP_URTS2_AO"),
+ MTK_FUNCTION(6, "O_URTS2"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO0")
+ ),
+
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO142"),
+ MTK_FUNCTION(1, "O_DPI_D11"),
+ MTK_FUNCTION(2, "O_GBE_TXEN"),
+ MTK_FUNCTION(3, "I0_DMIC4_DAT_R"),
+ MTK_FUNCTION(4, "O_PWM_1"),
+ MTK_FUNCTION(5, "I1_TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "I1_UCTS2"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO1")
+ ),
+
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO143"),
+ MTK_FUNCTION(1, "O_DPI_D12"),
+ MTK_FUNCTION(2, "O_GBE_MDC"),
+ MTK_FUNCTION(3, "B0_MD32_0_GPIO0"),
+ MTK_FUNCTION(4, "O_CLKM0"),
+ MTK_FUNCTION(5, "O_SPIM3_CSB"),
+ MTK_FUNCTION(6, "O_UTXD1"),
+ MTK_FUNCTION(7, "B0_MD32_1_GPIO2")
+ ),
+
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO144"),
+ MTK_FUNCTION(1, "O_DPI_D13"),
+ MTK_FUNCTION(2, "B1_GBE_MDIO"),
+ MTK_FUNCTION(3, "B0_MD32_0_GPIO1"),
+ MTK_FUNCTION(4, "O_CLKM1"),
+ MTK_FUNCTION(5, "O_SPIM3_CLK"),
+ MTK_FUNCTION(6, "I1_URXD1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR0")
+ ),
+
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO145"),
+ MTK_FUNCTION(1, "O_DPI_D14"),
+ MTK_FUNCTION(2, "O_GBE_TXER"),
+ MTK_FUNCTION(3, "B0_MD32_1_GPIO0"),
+ MTK_FUNCTION(4, "O_CMFLASH0"),
+ MTK_FUNCTION(5, "B0_SPIM3_MOSI"),
+ MTK_FUNCTION(6, "B0_GBE_AUX_PPS2"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR1")
+ ),
+
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO146"),
+ MTK_FUNCTION(1, "O_DPI_D15"),
+ MTK_FUNCTION(2, "I0_GBE_RXER"),
+ MTK_FUNCTION(3, "B0_MD32_1_GPIO1"),
+ MTK_FUNCTION(4, "O_CMFLASH1"),
+ MTK_FUNCTION(5, "B0_SPIM3_MISO"),
+ MTK_FUNCTION(6, "B0_GBE_AUX_PPS3"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR2")
+ ),
+
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO147"),
+ MTK_FUNCTION(1, "O_DPI_HSYNC"),
+ MTK_FUNCTION(2, "I0_GBE_COL"),
+ MTK_FUNCTION(3, "O_I2SO1_MCK"),
+ MTK_FUNCTION(4, "O_CMVREF0"),
+ MTK_FUNCTION(5, "O_SPDIF_OUT"),
+ MTK_FUNCTION(6, "O_URTS1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR3")
+ ),
+
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO148"),
+ MTK_FUNCTION(1, "O_DPI_VSYNC"),
+ MTK_FUNCTION(2, "I0_GBE_INTR"),
+ MTK_FUNCTION(3, "O_I2SO1_BCK"),
+ MTK_FUNCTION(4, "O_CMVREF1"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(6, "I1_UCTS1"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR4")
+ ),
+
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO149"),
+ MTK_FUNCTION(1, "O_DPI_DE"),
+ MTK_FUNCTION(2, "B0_GBE_AUX_PPS0"),
+ MTK_FUNCTION(3, "O_I2SO1_WS"),
+ MTK_FUNCTION(4, "O_CMVREF2"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(6, "O_UTXD3"),
+ MTK_FUNCTION(7, "O_PGD_HV_HSC_PWR5")
+ ),
+
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO150"),
+ MTK_FUNCTION(1, "O_DPI_CK"),
+ MTK_FUNCTION(2, "B0_GBE_AUX_PPS1"),
+ MTK_FUNCTION(3, "O_I2SO1_D0"),
+ MTK_FUNCTION(4, "O_CMVREF3"),
+ MTK_FUNCTION(5, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(6, "I1_URXD3")
+ ),
+
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO151"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT7")
+ ),
+
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO152"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT6")
+ ),
+
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO153"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT5")
+ ),
+
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO154"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT4")
+ ),
+
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO155"),
+ MTK_FUNCTION(1, "O_MSDC0_RSTB")
+ ),
+
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO156"),
+ MTK_FUNCTION(1, "B1_MSDC0_CMD")
+ ),
+
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO157"),
+ MTK_FUNCTION(1, "B1_MSDC0_CLK")
+ ),
+
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO158"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT3")
+ ),
+
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO159"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT2")
+ ),
+
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO160"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT1")
+ ),
+
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO161"),
+ MTK_FUNCTION(1, "B1_MSDC0_DAT0")
+ ),
+
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO162"),
+ MTK_FUNCTION(1, "B0_MSDC0_DSL")
+ ),
+
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO163"),
+ MTK_FUNCTION(1, "B1_MSDC1_CMD"),
+ MTK_FUNCTION(2, "O_SPDIF_OUT"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TMS"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO164"),
+ MTK_FUNCTION(1, "B1_MSDC1_CLK"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN0"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TCK"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO165"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT0"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN1"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "I1_SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TDI"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO166"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT1"),
+ MTK_FUNCTION(2, "I0_SPDIF_IN2"),
+ MTK_FUNCTION(3, "O_MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "O_SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "O_CCU0_JTAG_TDO"),
+ MTK_FUNCTION(7, "O_IPU_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO167"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT2"),
+ MTK_FUNCTION(2, "O_PWM_0"),
+ MTK_FUNCTION(3, "I1_MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(4, "I1_ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "I0_SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "I1_CCU0_JTAG_TRST"),
+ MTK_FUNCTION(7, "I0_IPU_JTAG_TRST")
+ ),
+
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO168"),
+ MTK_FUNCTION(1, "B1_MSDC1_DAT3"),
+ MTK_FUNCTION(2, "O_PWM_1"),
+ MTK_FUNCTION(3, "O_CLKM0")
+ ),
+
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO169"),
+ MTK_FUNCTION(1, "B1_MSDC2_CMD"),
+ MTK_FUNCTION(2, "O_LVTS_FOUT"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(4, "I0_UDI_TMS"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "B0_TDMIN_MCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO170"),
+ MTK_FUNCTION(1, "B1_MSDC2_CLK"),
+ MTK_FUNCTION(2, "O_LVTS_SDO"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(4, "I0_UDI_TCK"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "B0_TDMIN_BCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO171"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I0_LVTS_26M"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(4, "I0_UDI_TDI"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "B0_TDMIN_LRCK"),
+ MTK_FUNCTION(7, "I1_SSPM_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO172"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I0_LVTS_SCF"),
+ MTK_FUNCTION(3, "O_MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(4, "O_UDI_TDO"),
+ MTK_FUNCTION(5, "O_VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "I0_TDMIN_DI"),
+ MTK_FUNCTION(7, "O_SSPM_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO173"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I0_LVTS_SCK"),
+ MTK_FUNCTION(3, "I1_MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(4, "I0_UDI_NTRST"),
+ MTK_FUNCTION(5, "I0_VPU_UDI_NTRST"),
+ MTK_FUNCTION(7, "I0_SSPM_JTAG_TRSTN")
+ ),
+
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO174"),
+ MTK_FUNCTION(1, "B1_MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I0_LVTS_SDI")
+ ),
+
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO175"),
+ MTK_FUNCTION(1, "B0_SPMI_M_SCL")
+ ),
+
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "B_GPIO176"),
+ MTK_FUNCTION(1, "B0_SPMI_M_SDA")
+ ),
+
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 212),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 213),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 214),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 215),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+#endif /* __PINCTRL__MTK_MT8188_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index cc2cd73ff8f9..530f3f934e19 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -608,6 +608,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.label = pc->data->name;
pc->chip.parent = pc->dev;
+ pc->chip.fwnode = pc->fwnode;
pc->chip.request = gpiochip_generic_request;
pc->chip.free = gpiochip_generic_free;
pc->chip.set_config = gpiochip_generic_config;
@@ -619,8 +620,6 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
- pc->chip.of_node = pc->of_node;
- pc->chip.of_gpio_n_cells = 2;
ret = gpiochip_add_data(&pc->chip, pc);
if (ret) {
@@ -678,8 +677,8 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc)
return -EINVAL;
}
- gpio_np = to_of_node(gpiochip_node_get_first(pc->dev));
- pc->of_node = gpio_np;
+ pc->fwnode = gpiochip_node_get_first(pc->dev);
+ gpio_np = to_of_node(pc->fwnode);
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
if (IS_ERR_OR_NULL(pc->reg_mux)) {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index b197827027bd..34fc4e8612e4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -12,6 +12,8 @@
#include <linux/types.h>
#include <linux/module.h>
+struct fwnode_handle;
+
struct meson_pinctrl;
/**
@@ -131,7 +133,7 @@ struct meson_pinctrl {
struct regmap *reg_gpio;
struct regmap *reg_ds;
struct gpio_chip chip;
- struct device_node *of_node;
+ struct fwnode_handle *fwnode;
};
#define FUNCTION(fn) \
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index bcde042d29dc..261b46841b9f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -112,14 +112,14 @@ struct armada_37xx_pinctrl {
struct armada_37xx_pm_state pm;
};
-#define PIN_GRP(_name, _start, _nr, _mask, _func1, _func2) \
+#define PIN_GRP_GPIO_0(_name, _start, _nr) \
{ \
.name = _name, \
.start_pin = _start, \
.npins = _nr, \
- .reg_mask = _mask, \
- .val = {0, _mask}, \
- .funcs = {_func1, _func2} \
+ .reg_mask = 0, \
+ .val = {0}, \
+ .funcs = {"gpio"} \
}
#define PIN_GRP_GPIO(_name, _start, _nr, _mask, _func1) \
@@ -179,6 +179,7 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
"pwm", "led"),
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
+ PIN_GRP_GPIO_0("gpio1_5", 5, 1),
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
@@ -195,15 +196,18 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
+ PIN_GRP_GPIO_0("gpio2_2", 2, 1),
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"), /* this actually controls "pcie1_reset" */
PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
- PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
- PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
- PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
+ PIN_GRP_GPIO("ptp", 20, 1, BIT(11), "ptp"),
+ PIN_GRP_GPIO_3("ptp_clk", 21, 1, BIT(6) | BIT(12), 0, BIT(6), BIT(12),
+ "ptp", "mii"),
+ PIN_GRP_GPIO_3("ptp_trig", 22, 1, BIT(7) | BIT(13), 0, BIT(7), BIT(13),
+ "ptp", "mii"),
PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
"mii", "mii_err"),
};
@@ -486,11 +490,15 @@ static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev,
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct armada_37xx_pin_group *group;
int grp = 0;
+ int ret;
dev_dbg(info->dev, "requesting gpio %d\n", offset);
- while ((group = armada_37xx_find_next_grp_by_pin(info, offset, &grp)))
- armada_37xx_pmx_set_by_name(pctldev, "gpio", group);
+ while ((group = armada_37xx_find_next_grp_by_pin(info, offset, &grp))) {
+ ret = armada_37xx_pmx_set_by_name(pctldev, "gpio", group);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index ac3d4d91266d..758d21f0a850 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -674,163 +674,160 @@ static const unsigned hwobs_oc4_1_pins[] = { DB8500_PIN_D17, DB8500_PIN_D16,
DB8500_PIN_D21, DB8500_PIN_D20, DB8500_PIN_C20, DB8500_PIN_B21,
DB8500_PIN_C21, DB8500_PIN_A22, DB8500_PIN_B24, DB8500_PIN_C22 };
-#define DB8500_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
- .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
-
static const struct nmk_pingroup nmk_db8500_groups[] = {
/* Altfunction A column */
- DB8500_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(u1rxtx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(u1ctsrts_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipi2c_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipi2c_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0txrx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdvsi0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d12_d15_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout1_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout1_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout2_a_1, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(clkout2_a_2, NMK_GPIO_ALT_A),
- DB8500_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1rxtx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1ctsrts_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipi2c_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipi2c_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdvsi0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d12_d15_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout1_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout2_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(clkout2_a_2, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
/* Altfunction B column */
- DB8500_PIN_GROUP(trig_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c4_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c2_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c2_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp0txrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(i2c1_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(u2rxtx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodtx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp0sck_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(stmmod_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(uartmodrx_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcdaclk_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcda_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcd_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(lcd_d16_d23_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(mc3_b_1, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_2, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_3, NMK_GPIO_ALT_B),
- DB8500_PIN_GROUP(pwl_b_4, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(trig_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c4_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c2_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c2_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp0txrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2c1_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(u2rxtx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodtx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp0sck_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(stmmod_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(uartmodrx_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcdaclk_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcda_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcd_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(lcd_d16_d23_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(mc3_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_2, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_3, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(pwl_b_4, NMK_GPIO_ALT_B),
/* Altfunction C column */
- DB8500_PIN_GROUP(ipjtag_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio3_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio2_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(slim0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ms_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(iptrigout_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2ctsrts_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio2_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio3_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio4_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio5_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(u2rxtx_c_3, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmape_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(uartmodrx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(uartmodtx_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(stmmod_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(usbsim_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(mc4rstn_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(clkout1_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(clkout2_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipjtag_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio3_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio2_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(slim0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ms_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(iptrigout_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2ctsrts_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio2_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio3_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio4_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(ipgpio5_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u2rxtx_c_3, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmape_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(uartmodrx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(uartmodtx_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(stmmod_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(usbsim_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(mc4rstn_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(clkout1_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(clkout2_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(usbsim_c_2, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(i2c3_c_2, NMK_GPIO_ALT_C),
/* Other alt C1 column */
- DB8500_PIN_GROUP(u2rx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(ptma9_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(rf_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(hxclk_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(uartmodrx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(uartmodtx_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(stmmod_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(hxgpio_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(rf_oc1_2, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C1),
- DB8500_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(u2rx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(ptma9_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(rf_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(hxclk_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(uartmodrx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(uartmodtx_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(stmmod_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(hxgpio_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(rf_oc1_2, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C1),
+ NMK_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C1),
/* Other alt C2 column */
- DB8500_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C2),
- DB8500_PIN_GROUP(etmr4_oc2_1, NMK_GPIO_ALT_C2),
- DB8500_PIN_GROUP(ptma9_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(etmr4_oc2_1, NMK_GPIO_ALT_C2),
+ NMK_PIN_GROUP(ptma9_oc2_1, NMK_GPIO_ALT_C2),
/* Other alt C3 column */
- DB8500_PIN_GROUP(stmmod_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(stmmod_oc3_2, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(uartmodrx_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(uartmodtx_oc3_1, NMK_GPIO_ALT_C3),
- DB8500_PIN_GROUP(etmr4_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(stmmod_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(stmmod_oc3_2, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(uartmodrx_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(uartmodtx_oc3_1, NMK_GPIO_ALT_C3),
+ NMK_PIN_GROUP(etmr4_oc3_1, NMK_GPIO_ALT_C3),
/* Other alt C4 column */
- DB8500_PIN_GROUP(sbag_oc4_1, NMK_GPIO_ALT_C4),
- DB8500_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C4),
+ NMK_PIN_GROUP(sbag_oc4_1, NMK_GPIO_ALT_C4),
+ NMK_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C4),
};
/* We use this macro to define the groups applicable to a function */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 8d944bb3a036..c0d7c86d0939 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -303,23 +303,20 @@ static const unsigned usbhs_c_1_pins[] = { STN8815_PIN_E21, STN8815_PIN_E20,
STN8815_PIN_C16, STN8815_PIN_A15,
STN8815_PIN_D17, STN8815_PIN_C17 };
-#define STN8815_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
- .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
-
static const struct nmk_pingroup nmk_stn8815_groups[] = {
- STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
- STN8815_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(usbfs_b_1, NMK_GPIO_ALT_B),
- STN8815_PIN_GROUP(usbhs_c_1, NMK_GPIO_ALT_C),
+ NMK_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+ NMK_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(usbfs_b_1, NMK_GPIO_ALT_B),
+ NMK_PIN_GROUP(usbhs_c_1, NMK_GPIO_ALT_C),
};
/* We use this macro to define the groups applicable to a function */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f5014d09d81a..f7d02513d8cc 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -244,7 +244,6 @@ enum nmk_gpio_slpm {
struct nmk_gpio_chip {
struct gpio_chip chip;
- struct irq_chip irqchip;
void __iomem *addr;
struct clk *clk;
unsigned int bank;
@@ -608,8 +607,8 @@ static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
static void nmk_gpio_irq_ack(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
clk_enable(nmk_chip->clk);
writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
@@ -675,15 +674,11 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
__nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
}
-static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
+static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
+ struct irq_data *d, bool enable)
{
- struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
-
clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -696,29 +691,32 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
clk_disable(nmk_chip->clk);
-
- return 0;
}
static void nmk_gpio_irq_mask(struct irq_data *d)
{
- nmk_gpio_irq_maskunmask(d, false);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_maskunmask(nmk_chip, d, false);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void nmk_gpio_irq_unmask(struct irq_data *d)
{
- nmk_gpio_irq_maskunmask(d, true);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ nmk_gpio_irq_maskunmask(nmk_chip, d, true);
}
static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
- struct nmk_gpio_chip *nmk_chip;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
-
clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
@@ -740,14 +738,12 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
bool enabled = !irqd_irq_disabled(d);
bool wake = irqd_is_wakeup_set(d);
- struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = irq_data_get_irq_chip_data(d);
- if (!nmk_chip)
- return -EINVAL;
if (type & IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
if (type & IRQ_TYPE_LEVEL_LOW)
@@ -784,7 +780,8 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
{
- struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
clk_enable(nmk_chip->clk);
nmk_gpio_irq_unmask(d);
@@ -793,7 +790,8 @@ static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
static void nmk_gpio_irq_shutdown(struct irq_data *d)
{
- struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
nmk_gpio_irq_mask(d);
clk_disable(nmk_chip->clk);
@@ -1078,13 +1076,34 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
return nmk_chip;
}
+static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
+ gc->base, gc->base + gc->ngpio - 1);
+}
+
+static const struct irq_chip nmk_irq_chip = {
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+ .irq_startup = nmk_gpio_irq_startup,
+ .irq_shutdown = nmk_gpio_irq_shutdown,
+ .irq_print_chip = nmk_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int nmk_gpio_probe(struct platform_device *dev)
{
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- struct irq_chip *irqchip;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,22 +1144,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->can_sleep = false;
chip->owner = THIS_MODULE;
- irqchip = &nmk_chip->irqchip;
- irqchip->irq_ack = nmk_gpio_irq_ack;
- irqchip->irq_mask = nmk_gpio_irq_mask;
- irqchip->irq_unmask = nmk_gpio_irq_unmask;
- irqchip->irq_set_type = nmk_gpio_irq_set_type;
- irqchip->irq_set_wake = nmk_gpio_irq_set_wake;
- irqchip->irq_startup = nmk_gpio_irq_startup;
- irqchip->irq_shutdown = nmk_gpio_irq_shutdown;
- irqchip->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqchip->name = kasprintf(GFP_KERNEL, "nmk%u-%u-%u",
- dev->id,
- chip->base,
- chip->base + chip->ngpio - 1);
-
girq = &chip->irq;
- girq->chip = irqchip;
+ gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
girq->parent_handler = nmk_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&dev->dev, 1,
@@ -1179,17 +1184,17 @@ static const char *nmk_get_group_name(struct pinctrl_dev *pctldev,
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- return npct->soc->groups[selector].name;
+ return npct->soc->groups[selector].grp.name;
}
static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned *npins)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- *pins = npct->soc->groups[selector].pins;
- *num_pins = npct->soc->groups[selector].npins;
+ *pins = npct->soc->groups[selector].grp.pins;
+ *npins = npct->soc->groups[selector].grp.npins;
return 0;
}
@@ -1531,7 +1536,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
if (g->altsetting < 0)
return -EINVAL;
- dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
+ dev_dbg(npct->dev, "enable group %s, %u pins\n", g->grp.name, g->grp.npins);
/*
* If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
@@ -1566,26 +1571,26 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* Then mask the pins that need to be sleeping now when we're
* switching to the ALT C function.
*/
- for (i = 0; i < g->npins; i++)
- slpm[g->pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->pins[i]);
+ for (i = 0; i < g->grp.npins; i++)
+ slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
nmk_gpio_glitch_slpm_init(slpm);
}
- for (i = 0; i < g->npins; i++) {
+ for (i = 0; i < g->grp.npins; i++) {
struct nmk_gpio_chip *nmk_chip;
unsigned bit;
- nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i]);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d in group %s at index %d\n",
- g->pins[i], g->name, i);
+ g->grp.pins[i], g->grp.name, i);
goto out_glitch;
}
- dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
+ dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->grp.pins[i], g->altsetting);
clk_enable(nmk_chip->clk);
- bit = g->pins[i] % NMK_GPIO_PER_CHIP;
+ bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -1608,7 +1613,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* then some bits in PRCM GPIOCR registers must be cleared.
*/
if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C)
- nmk_prcm_altcx_set_mode(npct, g->pins[i],
+ nmk_prcm_altcx_set_mode(npct, g->grp.pins[i],
g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
}
@@ -1802,10 +1807,6 @@ static const struct of_device_id nmk_pinctrl_match[] = {
.compatible = "stericsson,db8500-pinctrl",
.data = (void *)PINCTRL_NMK_DB8500,
},
- {
- .compatible = "stericsson,db8540-pinctrl",
- .data = (void *)PINCTRL_NMK_DB8540,
- },
{},
};
@@ -1856,8 +1857,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
nmk_pinctrl_stn8815_init(&npct->soc);
if (version == PINCTRL_NMK_DB8500)
nmk_pinctrl_db8500_init(&npct->soc);
- if (version == PINCTRL_NMK_DB8540)
- nmk_pinctrl_db8540_init(&npct->soc);
/*
* Since we depend on the GPIO chips to provide clock and register base
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index ae0bac06639f..84e297757335 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -5,7 +5,6 @@
/* Package definitions */
#define PINCTRL_NMK_STN8815 0
#define PINCTRL_NMK_DB8500 1
-#define PINCTRL_NMK_DB8540 2
/* Alternate functions: function C is set in hw by setting both A and B */
#define NMK_GPIO_ALT_GPIO 0
@@ -105,21 +104,21 @@ struct nmk_function {
/**
* struct nmk_pingroup - describes a Nomadik pin group
- * @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
- * @num_pins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
+ * @grp: Generic data of the pin group (name and pins)
* @altsetting: the altsetting to apply to all pins in this group to
* configure them to be used by a function
*/
struct nmk_pingroup {
- const char *name;
- const unsigned int *pins;
- const unsigned npins;
+ struct pingroup grp;
int altsetting;
};
+#define NMK_PIN_GROUP(a, b) \
+ { \
+ .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
+ .altsetting = b, \
+ }
+
/**
* struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
* @pins: An array describing all pins the pin controller affects.
@@ -173,17 +172,4 @@ nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
#endif
-#ifdef CONFIG_PINCTRL_DB8540
-
-void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
#endif /* PINCTRL_PINCTRL_NOMADIK_H */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 64d8a568b3db..1c4e89b046de 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -81,11 +81,11 @@ struct npcm7xx_gpio {
int irq;
struct irq_chip irq_chip;
u32 pinctrl_id;
- int (*direction_input)(struct gpio_chip *chip, unsigned offset);
- int (*direction_output)(struct gpio_chip *chip, unsigned offset,
+ int (*direction_input)(struct gpio_chip *chip, unsigned int offset);
+ int (*direction_output)(struct gpio_chip *chip, unsigned int offset,
int value);
- int (*request)(struct gpio_chip *chip, unsigned offset);
- void (*free)(struct gpio_chip *chip, unsigned offset);
+ int (*request)(struct gpio_chip *chip, unsigned int offset);
+ void (*free)(struct gpio_chip *chip, unsigned int offset);
};
struct npcm7xx_pinctrl {
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 0dbeb91f0bf2..8193b92da403 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -1081,10 +1081,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
girq->num_parents = 0;
for (i = 0; i < WPCM450_NUM_GPIO_IRQS; i++) {
- int irq = fwnode_irq_get(child, i);
+ int irq;
+ irq = fwnode_irq_get(child, i);
if (irq < 0)
break;
+ if (!irq)
+ continue;
girq->parents[i] = irq;
girq->num_parents++;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 4691a33bc374..6be896871718 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -246,7 +246,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
}
seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "📌%d\t", i);
+ seq_printf(s, "#%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -278,32 +278,32 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask = "-";
+ interrupt_mask = "😛";
else
- interrupt_mask = "+";
- seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_mask = "😷";
+ seq_printf(s, "int %s (%s)| active-%s| %s-⚡| ",
interrupt_enable,
interrupt_mask,
active_level,
level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "+";
+ wake_cntrl0 = "⏰";
else
- wake_cntrl0 = "∅";
- seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
+ wake_cntrl0 = " ∅";
+ seq_printf(s, "S0i3 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "+";
+ wake_cntrl1 = "⏰";
else
- wake_cntrl1 = "∅";
- seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
+ wake_cntrl1 = " ∅";
+ seq_printf(s, "S3 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "+";
+ wake_cntrl2 = "⏰";
else
- wake_cntrl2 = "∅";
- seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
+ wake_cntrl2 = " ∅";
+ seq_printf(s, "S4/S5 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
pull_up_enable = "+";
@@ -367,7 +367,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
debounce_enable = " ∅";
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
- seq_printf(s, "debounce %s (⏰ %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
seq_printf(s, " 0x%x\n", pin_reg);
}
}
@@ -639,7 +639,7 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
if (!(regval & PIN_IRQ_PENDING) ||
!(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
- generic_handle_domain_irq(gc->irq.domain, irqnr + i);
+ generic_handle_domain_irq_safe(gc->irq.domain, irqnr + i);
/* Clear interrupt.
* We must read the pin register again, in case the
@@ -1051,13 +1051,13 @@ static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
if (index < 0) {
- dev_warn(dev, "failed to get iomux index\n");
+ dev_dbg(dev, "iomux not supported\n");
goto out_no_pinmux;
}
gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
if (IS_ERR(gpio_dev->iomux_base)) {
- dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ dev_dbg(dev, "iomux not supported %d io resource\n", index);
goto out_no_pinmux;
}
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 5634fa063ebf..81dbffab621f 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -22,8 +22,7 @@
#include <linux/pinctrl/pinmux.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-
-#include <soc/at91/pm.h>
+#include <linux/pm.h>
#include "pinctrl-at91.h"
#include "core.h"
@@ -33,16 +32,34 @@
struct at91_pinctrl_mux_ops;
+/**
+ * struct at91_gpio_chip: at91 gpio chip
+ * @chip: gpio chip
+ * @range: gpio range
+ * @next: bank sharing same clock
+ * @pioc_hwirq: PIO bank interrupt identifier on AIC
+ * @pioc_virq: PIO bank Linux virtual interrupt
+ * @pioc_idx: PIO bank index
+ * @regbase: PIO bank virtual address
+ * @clock: associated clock
+ * @ops: at91 pinctrl mux ops
+ * @wakeups: wakeup interrupts
+ * @backups: interrupts disabled in suspend
+ * @id: gpio chip identifier
+ */
struct at91_gpio_chip {
struct gpio_chip chip;
struct pinctrl_gpio_range range;
- struct at91_gpio_chip *next; /* Bank sharing same clock */
- int pioc_hwirq; /* PIO bank interrupt identifier on AIC */
- int pioc_virq; /* PIO bank Linux virtual interrupt */
- int pioc_idx; /* PIO bank index */
- void __iomem *regbase; /* PIO bank virtual address */
- struct clk *clock; /* associated clock */
- const struct at91_pinctrl_mux_ops *ops; /* ops */
+ struct at91_gpio_chip *next;
+ int pioc_hwirq;
+ int pioc_virq;
+ int pioc_idx;
+ void __iomem *regbase;
+ struct clk *clock;
+ const struct at91_pinctrl_mux_ops *ops;
+ u32 wakeups;
+ u32 backups;
+ u32 id;
};
static struct at91_gpio_chip *gpio_chips[MAX_GPIO_BANKS];
@@ -1615,70 +1632,51 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-static u32 wakeups[MAX_GPIO_BANKS];
-static u32 backups[MAX_GPIO_BANKS];
-
static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
{
struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
- unsigned bank = at91_gpio->pioc_idx;
unsigned mask = 1 << d->hwirq;
- if (unlikely(bank >= MAX_GPIO_BANKS))
- return -EINVAL;
-
if (state)
- wakeups[bank] |= mask;
+ at91_gpio->wakeups |= mask;
else
- wakeups[bank] &= ~mask;
+ at91_gpio->wakeups &= ~mask;
irq_set_irq_wake(at91_gpio->pioc_virq, state);
return 0;
}
-void at91_pinctrl_gpio_suspend(void)
+static int at91_gpio_suspend(struct device *dev)
{
- int i;
-
- for (i = 0; i < gpio_banks; i++) {
- void __iomem *pio;
+ struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
+ void __iomem *pio = at91_chip->regbase;
- if (!gpio_chips[i])
- continue;
-
- pio = gpio_chips[i]->regbase;
+ at91_chip->backups = readl_relaxed(pio + PIO_IMR);
+ writel_relaxed(at91_chip->backups, pio + PIO_IDR);
+ writel_relaxed(at91_chip->wakeups, pio + PIO_IER);
- backups[i] = readl_relaxed(pio + PIO_IMR);
- writel_relaxed(backups[i], pio + PIO_IDR);
- writel_relaxed(wakeups[i], pio + PIO_IER);
+ if (!at91_chip->wakeups)
+ clk_disable_unprepare(at91_chip->clock);
+ else
+ dev_dbg(dev, "GPIO-%c may wake for %08x\n",
+ 'A' + at91_chip->id, at91_chip->wakeups);
- if (!wakeups[i])
- clk_disable_unprepare(gpio_chips[i]->clock);
- else
- printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
- 'A'+i, wakeups[i]);
- }
+ return 0;
}
-void at91_pinctrl_gpio_resume(void)
+static int at91_gpio_resume(struct device *dev)
{
- int i;
-
- for (i = 0; i < gpio_banks; i++) {
- void __iomem *pio;
+ struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
+ void __iomem *pio = at91_chip->regbase;
- if (!gpio_chips[i])
- continue;
-
- pio = gpio_chips[i]->regbase;
+ if (!at91_chip->wakeups)
+ clk_prepare_enable(at91_chip->clock);
- if (!wakeups[i])
- clk_prepare_enable(gpio_chips[i]->clock);
+ writel_relaxed(at91_chip->wakeups, pio + PIO_IDR);
+ writel_relaxed(at91_chip->backups, pio + PIO_IER);
- writel_relaxed(wakeups[i], pio + PIO_IDR);
- writel_relaxed(backups[i], pio + PIO_IER);
- }
+ return 0;
}
static void gpio_irq_handler(struct irq_desc *desc)
@@ -1860,6 +1858,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
}
at91_chip->chip = at91_gpio_template;
+ at91_chip->id = alias_idx;
chip = &at91_chip->chip;
chip->label = dev_name(&pdev->dev);
@@ -1905,6 +1904,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto gpiochip_add_err;
gpio_chips[alias_idx] = at91_chip;
+ platform_set_drvdata(pdev, at91_chip);
gpio_banks = max(gpio_banks, alias_idx + 1);
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
@@ -1920,10 +1920,15 @@ err:
return ret;
}
+static const struct dev_pm_ops at91_gpio_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(at91_gpio_suspend, at91_gpio_resume)
+};
+
static struct platform_driver at91_gpio_driver = {
.driver = {
.name = "gpio-at91",
.of_match_table = at91_gpio_of_match,
+ .pm = pm_ptr(&at91_gpio_pm_ops),
},
.probe = at91_gpio_probe,
};
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
new file mode 100644
index 000000000000..68509a2301b8
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CY8C95X0 20/40/60 pin I2C GPIO port expander with interrupt support
+ *
+ * Copyright (C) 2022 9elements GmbH
+ * Authors: Patrick Rudolph <patrick.rudolph@9elements.com>
+ * Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/dmi.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+
+/* Fast access registers */
+#define CY8C95X0_INPUT 0x00
+#define CY8C95X0_OUTPUT 0x08
+#define CY8C95X0_INTSTATUS 0x10
+
+#define CY8C95X0_INPUT_(x) (CY8C95X0_INPUT + (x))
+#define CY8C95X0_OUTPUT_(x) (CY8C95X0_OUTPUT + (x))
+#define CY8C95X0_INTSTATUS_(x) (CY8C95X0_INTSTATUS + (x))
+
+/* Port Select configures the port */
+#define CY8C95X0_PORTSEL 0x18
+/* Port settings, write PORTSEL first */
+#define CY8C95X0_INTMASK 0x19
+#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_INVERT 0x1B
+#define CY8C95X0_DIRECTION 0x1C
+/* Drive mode register change state on writing '1' */
+#define CY8C95X0_DRV_PU 0x1D
+#define CY8C95X0_DRV_PD 0x1E
+#define CY8C95X0_DRV_ODH 0x1F
+#define CY8C95X0_DRV_ODL 0x20
+#define CY8C95X0_DRV_PP_FAST 0x21
+#define CY8C95X0_DRV_PP_SLOW 0x22
+#define CY8C95X0_DRV_HIZ 0x23
+#define CY8C95X0_DEVID 0x2E
+#define CY8C95X0_WATCHDOG 0x2F
+#define CY8C95X0_COMMAND 0x30
+
+#define CY8C95X0_PIN_TO_OFFSET(x) (((x) >= 20) ? ((x) + 4) : (x))
+
+static const struct i2c_device_id cy8c95x0_id[] = {
+ { "cy8c9520", 20, },
+ { "cy8c9540", 40, },
+ { "cy8c9560", 60, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cy8c95x0_id);
+
+#define OF_CY8C95X(__nrgpio) ((void *)(__nrgpio))
+
+static const struct of_device_id cy8c95x0_dt_ids[] = {
+ { .compatible = "cypress,cy8c9520", .data = OF_CY8C95X(20), },
+ { .compatible = "cypress,cy8c9540", .data = OF_CY8C95X(40), },
+ { .compatible = "cypress,cy8c9560", .data = OF_CY8C95X(60), },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cy8c95x0_dt_ids);
+
+static const struct acpi_gpio_params cy8c95x0_irq_gpios = { 0, 0, true };
+
+static const struct acpi_gpio_mapping cy8c95x0_acpi_irq_gpios[] = {
+ { "irq-gpios", &cy8c95x0_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER },
+ { }
+};
+
+static int cy8c95x0_acpi_get_irq(struct device *dev)
+{
+ int ret;
+
+ ret = devm_acpi_dev_add_driver_gpios(dev, cy8c95x0_acpi_irq_gpios);
+ if (ret)
+ dev_warn(dev, "can't add GPIO ACPI mapping\n");
+
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret);
+ return ret;
+}
+
+static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
+ {
+ /*
+ * On Intel Galileo Gen 1 board the IRQ pin is provided
+ * as an absolute number instead of being relative.
+ * Since first controller (gpio-sch.c) and second
+ * (gpio-dwapb.c) are at the fixed bases, we may safely
+ * refer to the number in the global space to get an IRQ
+ * out of it.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ },
+ {}
+};
+
+#define MAX_BANK 8
+#define BANK_SZ 8
+#define MAX_LINE (MAX_BANK * BANK_SZ)
+
+#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
+
+/**
+ * struct cy8c95x0_pinctrl - driver data
+ * @regmap: Device's regmap
+ * @irq_lock: IRQ bus lock
+ * @i2c_lock: Mutex for the device internal mux register
+ * @irq_mask: I/O bits affected by interrupts
+ * @irq_trig_raise: I/O bits affected by raising voltage level
+ * @irq_trig_fall: I/O bits affected by falling voltage level
+ * @irq_trig_low: I/O bits affected by a low voltage level
+ * @irq_trig_high: I/O bits affected by a high voltage level
+ * @push_pull: I/O bits configured as push pull driver
+ * @shiftmask: Mask used to compensate for Gport2 width
+ * @nport: Number of Gports in this chip
+ * @gpio_chip: gpiolib chip
+ * @driver_data: private driver data
+ * @regulator: Pointer to the regulator for the IC
+ * @dev: struct device
+ * @pctldev: pin controller device
+ * @pinctrl_desc: pin controller description
+ * @name: Chip controller name
+ * @tpin: Total number of pins
+ */
+struct cy8c95x0_pinctrl {
+ struct regmap *regmap;
+ struct mutex irq_lock;
+ struct mutex i2c_lock;
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_low, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_high, MAX_LINE);
+ DECLARE_BITMAP(push_pull, MAX_LINE);
+ DECLARE_BITMAP(shiftmask, MAX_LINE);
+ int nport;
+ struct gpio_chip gpio_chip;
+ unsigned long driver_data;
+ struct regulator *regulator;
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+ char name[32];
+ unsigned int tpin;
+};
+
+static const struct pinctrl_pin_desc cy8c9560_pins[] = {
+ PINCTRL_PIN(0, "gp00"),
+ PINCTRL_PIN(1, "gp01"),
+ PINCTRL_PIN(2, "gp02"),
+ PINCTRL_PIN(3, "gp03"),
+ PINCTRL_PIN(4, "gp04"),
+ PINCTRL_PIN(5, "gp05"),
+ PINCTRL_PIN(6, "gp06"),
+ PINCTRL_PIN(7, "gp07"),
+
+ PINCTRL_PIN(8, "gp10"),
+ PINCTRL_PIN(9, "gp11"),
+ PINCTRL_PIN(10, "gp12"),
+ PINCTRL_PIN(11, "gp13"),
+ PINCTRL_PIN(12, "gp14"),
+ PINCTRL_PIN(13, "gp15"),
+ PINCTRL_PIN(14, "gp16"),
+ PINCTRL_PIN(15, "gp17"),
+
+ PINCTRL_PIN(16, "gp20"),
+ PINCTRL_PIN(17, "gp21"),
+ PINCTRL_PIN(18, "gp22"),
+ PINCTRL_PIN(19, "gp23"),
+
+ PINCTRL_PIN(20, "gp30"),
+ PINCTRL_PIN(21, "gp31"),
+ PINCTRL_PIN(22, "gp32"),
+ PINCTRL_PIN(23, "gp33"),
+ PINCTRL_PIN(24, "gp34"),
+ PINCTRL_PIN(25, "gp35"),
+ PINCTRL_PIN(26, "gp36"),
+ PINCTRL_PIN(27, "gp37"),
+
+ PINCTRL_PIN(28, "gp40"),
+ PINCTRL_PIN(29, "gp41"),
+ PINCTRL_PIN(30, "gp42"),
+ PINCTRL_PIN(31, "gp43"),
+ PINCTRL_PIN(32, "gp44"),
+ PINCTRL_PIN(33, "gp45"),
+ PINCTRL_PIN(34, "gp46"),
+ PINCTRL_PIN(35, "gp47"),
+
+ PINCTRL_PIN(36, "gp50"),
+ PINCTRL_PIN(37, "gp51"),
+ PINCTRL_PIN(38, "gp52"),
+ PINCTRL_PIN(39, "gp53"),
+ PINCTRL_PIN(40, "gp54"),
+ PINCTRL_PIN(41, "gp55"),
+ PINCTRL_PIN(42, "gp56"),
+ PINCTRL_PIN(43, "gp57"),
+
+ PINCTRL_PIN(44, "gp60"),
+ PINCTRL_PIN(45, "gp61"),
+ PINCTRL_PIN(46, "gp62"),
+ PINCTRL_PIN(47, "gp63"),
+ PINCTRL_PIN(48, "gp64"),
+ PINCTRL_PIN(49, "gp65"),
+ PINCTRL_PIN(50, "gp66"),
+ PINCTRL_PIN(51, "gp67"),
+
+ PINCTRL_PIN(52, "gp70"),
+ PINCTRL_PIN(53, "gp71"),
+ PINCTRL_PIN(54, "gp72"),
+ PINCTRL_PIN(55, "gp73"),
+ PINCTRL_PIN(56, "gp74"),
+ PINCTRL_PIN(57, "gp75"),
+ PINCTRL_PIN(58, "gp76"),
+ PINCTRL_PIN(59, "gp77"),
+};
+
+static const char * const cy8c95x0_groups[] = {
+ "gp00",
+ "gp01",
+ "gp02",
+ "gp03",
+ "gp04",
+ "gp05",
+ "gp06",
+ "gp07",
+
+ "gp10",
+ "gp11",
+ "gp12",
+ "gp13",
+ "gp14",
+ "gp15",
+ "gp16",
+ "gp17",
+
+ "gp20",
+ "gp21",
+ "gp22",
+ "gp23",
+
+ "gp30",
+ "gp31",
+ "gp32",
+ "gp33",
+ "gp34",
+ "gp35",
+ "gp36",
+ "gp37",
+
+ "gp40",
+ "gp41",
+ "gp42",
+ "gp43",
+ "gp44",
+ "gp45",
+ "gp46",
+ "gp47",
+
+ "gp50",
+ "gp51",
+ "gp52",
+ "gp53",
+ "gp54",
+ "gp55",
+ "gp56",
+ "gp57",
+
+ "gp60",
+ "gp61",
+ "gp62",
+ "gp63",
+ "gp64",
+ "gp65",
+ "gp66",
+ "gp67",
+
+ "gp70",
+ "gp71",
+ "gp72",
+ "gp73",
+ "gp74",
+ "gp75",
+ "gp76",
+ "gp77",
+};
+
+static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+{
+ /* Account for GPORT2 which only has 4 bits */
+ return CY8C95X0_PIN_TO_OFFSET(pin) / BANK_SZ;
+}
+
+static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+{
+ /* Account for GPORT2 which only has 4 bits */
+ return BIT(CY8C95X0_PIN_TO_OFFSET(pin) % BANK_SZ);
+}
+
+static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x24 ... 0x27:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ return false;
+ case CY8C95X0_DEVID:
+ return false;
+ case 0x24 ... 0x27:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cy8c95x0_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default cy8c95x0_reg_defaults[] = {
+ { CY8C95X0_OUTPUT_(0), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(1), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(2), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(3), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(4), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(5), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(6), GENMASK(7, 0) },
+ { CY8C95X0_OUTPUT_(7), GENMASK(7, 0) },
+ { CY8C95X0_PORTSEL, 0 },
+ { CY8C95X0_PWMSEL, 0 },
+};
+
+static const struct regmap_config cy8c95x0_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .reg_defaults = cy8c95x0_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cy8c95x0_reg_defaults),
+
+ .readable_reg = cy8c95x0_readable_register,
+ .writeable_reg = cy8c95x0_writeable_register,
+ .volatile_reg = cy8c95x0_volatile_register,
+ .precious_reg = cy8c95x0_precious_register,
+
+ .cache_type = REGCACHE_FLAT,
+ .max_register = CY8C95X0_COMMAND,
+};
+
+static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
+ unsigned long *val, unsigned long *mask)
+{
+ DECLARE_BITMAP(tmask, MAX_LINE);
+ DECLARE_BITMAP(tval, MAX_LINE);
+ int write_val;
+ int ret = 0;
+ int i, off = 0;
+ u8 bits;
+
+ /* Add the 4 bit gap of Gport2 */
+ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
+ bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
+
+ bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tval, tval, 4, MAX_LINE);
+ bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
+
+ mutex_lock(&chip->i2c_lock);
+ for (i = 0; i < chip->nport; i++) {
+ /* Skip over unused banks */
+ bits = bitmap_get_value8(tmask, i * BANK_SZ);
+ if (!bits)
+ continue;
+
+ switch (reg) {
+ /* Muxed registers */
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
+ if (ret < 0)
+ goto out;
+ off = reg;
+ break;
+ /* Direct access registers */
+ case CY8C95X0_INPUT:
+ case CY8C95X0_OUTPUT:
+ case CY8C95X0_INTSTATUS:
+ off = reg + i;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ write_val = bitmap_get_value8(tval, i * BANK_SZ);
+
+ ret = regmap_update_bits(chip->regmap, off, bits, write_val);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ if (ret < 0)
+ dev_err(chip->dev, "failed writing register %d: err %d\n", off, ret);
+
+ return ret;
+}
+
+static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
+ unsigned long *val, unsigned long *mask)
+{
+ DECLARE_BITMAP(tmask, MAX_LINE);
+ DECLARE_BITMAP(tval, MAX_LINE);
+ DECLARE_BITMAP(tmp, MAX_LINE);
+ int read_val;
+ int ret = 0;
+ int i, off = 0;
+ u8 bits;
+
+ /* Add the 4 bit gap of Gport2 */
+ bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
+ bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
+
+ bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
+ bitmap_shift_left(tval, tval, 4, MAX_LINE);
+ bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
+
+ mutex_lock(&chip->i2c_lock);
+ for (i = 0; i < chip->nport; i++) {
+ /* Skip over unused banks */
+ bits = bitmap_get_value8(tmask, i * BANK_SZ);
+ if (!bits)
+ continue;
+
+ switch (reg) {
+ /* Muxed registers */
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
+ if (ret < 0)
+ goto out;
+ off = reg;
+ break;
+ /* Direct access registers */
+ case CY8C95X0_INPUT:
+ case CY8C95X0_OUTPUT:
+ case CY8C95X0_INTSTATUS:
+ off = reg + i;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = regmap_read(chip->regmap, off, &read_val);
+ if (ret < 0)
+ goto out;
+
+ read_val &= bits;
+ read_val |= bitmap_get_value8(tval, i * BANK_SZ) & ~bits;
+ bitmap_set_value8(tval, read_val, i * BANK_SZ);
+ }
+
+ /* Fill the 4 bit gap of Gport2 */
+ bitmap_shift_right(tmp, tval, 4, MAX_LINE);
+ bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ if (ret < 0)
+ dev_err(chip->dev, "failed reading register %d: err %d\n", off, ret);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret)
+ goto out;
+
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+ if (ret)
+ goto out;
+
+ if (test_bit(off, chip->push_pull)) {
+ /*
+ * Disable driving the pin by forcing it to HighZ. Only setting the
+ * direction register isn't sufficient in Push-Pull mode.
+ */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DRV_HIZ, bit, bit);
+ if (ret)
+ goto out;
+
+ __clear_bit(off, chip->push_pull);
+ }
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int off, int val)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 outreg = CY8C95X0_OUTPUT_(port);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ /* Set output level */
+ ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ if (ret)
+ return ret;
+
+ mutex_lock(&chip->i2c_lock);
+ /* Select port... */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret)
+ goto out;
+
+ /* ...then direction */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, 0);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 inreg = CY8C95X0_INPUT_(cypress_get_port(chip, off));
+ u8 bit = cypress_get_pin_mask(chip, off);
+ u32 reg_val;
+ int ret;
+
+ ret = regmap_read(chip->regmap, inreg, &reg_val);
+ if (ret < 0) {
+ /*
+ * NOTE:
+ * Diagnostic already emitted; that's all we should
+ * do unless gpio_*_value_cansleep() calls become different
+ * from their nonsleeping siblings (and report faults).
+ */
+ return 0;
+ }
+
+ return !!(reg_val & bit);
+}
+
+static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 outreg = CY8C95X0_OUTPUT_(cypress_get_port(chip, off));
+ u8 bit = cypress_get_pin_mask(chip, off);
+
+ regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+}
+
+static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ u32 reg_val;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(chip->regmap, CY8C95X0_DIRECTION, &reg_val);
+ if (ret < 0)
+ goto out;
+
+ mutex_unlock(&chip->i2c_lock);
+
+ if (reg_val & bit)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+out:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int off,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned int reg;
+ u32 reg_val;
+ u16 arg = 0;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ reg = CY8C95X0_DRV_PU;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ reg = CY8C95X0_DRV_PD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ reg = CY8C95X0_DRV_HIZ;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ reg = CY8C95X0_DRV_ODL;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ reg = CY8C95X0_DRV_ODH;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ reg = CY8C95X0_DRV_PP_FAST;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ reg = CY8C95X0_DIRECTION;
+ break;
+ case PIN_CONFIG_MODE_PWM:
+ reg = CY8C95X0_PWMSEL;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ reg = CY8C95X0_OUTPUT_(port);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ reg = CY8C95X0_DIRECTION;
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ case PIN_CONFIG_INPUT_SCHMITT:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ case PIN_CONFIG_MODE_LOW_POWER:
+ case PIN_CONFIG_PERSIST_STATE:
+ case PIN_CONFIG_POWER_SOURCE:
+ case PIN_CONFIG_SKEW_DELAY:
+ case PIN_CONFIG_SLEEP_HARDWARE_STATE:
+ case PIN_CONFIG_SLEW_RATE:
+ default:
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ /*
+ * Writing 1 to one of the drive mode registers will automatically
+ * clear conflicting set bits in the other drive mode registers.
+ */
+ ret = regmap_read(chip->regmap, reg, &reg_val);
+ if (reg_val & bit)
+ arg = 1;
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int off,
+ unsigned long config)
+{
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned long param = pinconf_to_config_param(config);
+ unsigned int reg;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PU;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_HIZ;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_ODL;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ __clear_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_ODH;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ __set_bit(off, chip->push_pull);
+ reg = CY8C95X0_DRV_PP_FAST;
+ break;
+ case PIN_CONFIG_MODE_PWM:
+ reg = CY8C95X0_PWMSEL;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ /*
+ * Writing 1 to one of the drive mode registers will automatically
+ * clear conflicting set bits in the other drive mode registers.
+ */
+ ret = regmap_write_bits(chip->regmap, reg, bit, bit);
+
+out:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
+}
+
+static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ return cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, bits, mask);
+}
+
+static void cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
+}
+
+static int cy8c95x0_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ struct device *dev = chip->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(gc, dev_name(dev), 0, 0, chip->tpin);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
+{
+ struct gpio_chip *gc = &chip->gpio_chip;
+
+ gc->direction_input = cy8c95x0_gpio_direction_input;
+ gc->direction_output = cy8c95x0_gpio_direction_output;
+ gc->get = cy8c95x0_gpio_get_value;
+ gc->set = cy8c95x0_gpio_set_value;
+ gc->get_direction = cy8c95x0_gpio_get_direction;
+ gc->get_multiple = cy8c95x0_gpio_get_multiple;
+ gc->set_multiple = cy8c95x0_gpio_set_multiple;
+ gc->set_config = gpiochip_generic_config,
+ gc->can_sleep = true;
+ gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
+
+ gc->base = -1;
+ gc->ngpio = chip->tpin;
+
+ gc->parent = chip->dev;
+ gc->owner = THIS_MODULE;
+ gc->names = NULL;
+
+ gc->label = dev_name(chip->dev);
+
+ return devm_gpiochip_add_data(chip->dev, gc, chip);
+}
+
+static void cy8c95x0_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ set_bit(hwirq, chip->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void cy8c95x0_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ clear_bit(hwirq, chip->irq_mask);
+}
+
+static void cy8c95x0_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void cy8c95x0_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ DECLARE_BITMAP(ones, MAX_LINE);
+ DECLARE_BITMAP(irq_mask, MAX_LINE);
+ DECLARE_BITMAP(reg_direction, MAX_LINE);
+
+ bitmap_fill(ones, MAX_LINE);
+
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_INTMASK, chip->irq_mask, ones);
+
+ /* Switch direction to input if needed */
+ cy8c95x0_read_regs_mask(chip, CY8C95X0_DIRECTION, reg_direction, chip->irq_mask);
+ bitmap_or(irq_mask, chip->irq_mask, reg_direction, MAX_LINE);
+ bitmap_complement(irq_mask, irq_mask, MAX_LINE);
+
+ /* Look for any newly setup interrupt */
+ cy8c95x0_write_regs_mask(chip, CY8C95X0_DIRECTION, ones, irq_mask);
+
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int cy8c95x0_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned int trig_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ trig_type = type;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trig_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trig_type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ default:
+ dev_err(chip->dev, "irq %d: unsupported type %d\n", d->irq, type);
+ return -EINVAL;
+ }
+
+ assign_bit(hwirq, chip->irq_trig_fall, trig_type & IRQ_TYPE_EDGE_FALLING);
+ assign_bit(hwirq, chip->irq_trig_raise, trig_type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_low, type == IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_high, type == IRQ_TYPE_LEVEL_HIGH);
+
+ return 0;
+}
+
+static void cy8c95x0_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ clear_bit(hwirq, chip->irq_trig_raise);
+ clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_low);
+ clear_bit(hwirq, chip->irq_trig_high);
+}
+
+static const struct irq_chip cy8c95x0_irqchip = {
+ .name = "cy8c95x0-irq",
+ .irq_mask = cy8c95x0_irq_mask,
+ .irq_unmask = cy8c95x0_irq_unmask,
+ .irq_bus_lock = cy8c95x0_irq_bus_lock,
+ .irq_bus_sync_unlock = cy8c95x0_irq_bus_sync_unlock,
+ .irq_set_type = cy8c95x0_irq_set_type,
+ .irq_shutdown = cy8c95x0_irq_shutdown,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool cy8c95x0_irq_pending(struct cy8c95x0_pinctrl *chip, unsigned long *pending)
+{
+ DECLARE_BITMAP(ones, MAX_LINE);
+ DECLARE_BITMAP(cur_stat, MAX_LINE);
+ DECLARE_BITMAP(new_stat, MAX_LINE);
+ DECLARE_BITMAP(trigger, MAX_LINE);
+
+ bitmap_fill(ones, MAX_LINE);
+
+ /* Read the current interrupt status from the device */
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_INTSTATUS, trigger, ones))
+ return false;
+
+ /* Check latched inputs */
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, cur_stat, trigger))
+ return false;
+
+ /* Apply filter for rising/falling edge selection */
+ bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise,
+ cur_stat, MAX_LINE);
+
+ bitmap_and(pending, new_stat, trigger, MAX_LINE);
+
+ return !bitmap_empty(pending, MAX_LINE);
+}
+
+static irqreturn_t cy8c95x0_irq_handler(int irq, void *devid)
+{
+ struct cy8c95x0_pinctrl *chip = devid;
+ struct gpio_chip *gc = &chip->gpio_chip;
+ DECLARE_BITMAP(pending, MAX_LINE);
+ int nested_irq, level;
+ bool ret;
+
+ ret = cy8c95x0_irq_pending(chip, pending);
+ if (!ret)
+ return IRQ_RETVAL(0);
+
+ ret = 0;
+ for_each_set_bit(level, pending, MAX_LINE) {
+ /* Already accounted for 4bit gap in GPort2 */
+ nested_irq = irq_find_mapping(gc->irq.domain, level);
+
+ if (unlikely(nested_irq <= 0)) {
+ dev_warn_ratelimited(gc->parent, "unmapped interrupt %d\n", level);
+ continue;
+ }
+
+ if (test_bit(level, chip->irq_trig_low))
+ while (!cy8c95x0_gpio_get_value(gc, level))
+ handle_nested_irq(nested_irq);
+ else if (test_bit(level, chip->irq_trig_high))
+ while (cy8c95x0_gpio_get_value(gc, level))
+ handle_nested_irq(nested_irq);
+ else
+ handle_nested_irq(nested_irq);
+
+ ret = 1;
+ }
+
+ return IRQ_RETVAL(ret);
+}
+
+static int cy8c95x0_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ return chip->tpin;
+}
+
+static const char *cy8c95x0_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return cy8c95x0_groups[group];
+}
+
+static int cy8c95x0_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &cy8c9560_pins[group].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const char *cy8c95x0_get_fname(unsigned int selector)
+{
+ if (selector == 0)
+ return "gpio";
+ else
+ return "pwm";
+}
+
+static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ DECLARE_BITMAP(mask, MAX_LINE);
+ DECLARE_BITMAP(pwm, MAX_LINE);
+
+ bitmap_zero(mask, MAX_LINE);
+ __set_bit(pin, mask);
+
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ seq_puts(s, "not available");
+ return;
+ }
+
+ seq_printf(s, "MODE:%s", cy8c95x0_get_fname(test_bit(pin, pwm)));
+}
+
+static const struct pinctrl_ops cy8c95x0_pinctrl_ops = {
+ .get_groups_count = cy8c95x0_pinctrl_get_groups_count,
+ .get_group_name = cy8c95x0_pinctrl_get_group_name,
+ .get_group_pins = cy8c95x0_pinctrl_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+ .pin_dbg_show = cy8c95x0_pin_dbg_show,
+};
+
+static const char *cy8c95x0_get_function_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return cy8c95x0_get_fname(selector);
+}
+
+static int cy8c95x0_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return 2;
+}
+
+static int cy8c95x0_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = cy8c95x0_groups;
+ *num_groups = chip->tpin;
+ return 0;
+}
+
+static int cy8c95x0_pinmux_cfg(struct cy8c95x0_pinctrl *chip,
+ unsigned int val,
+ unsigned long off)
+{
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ int ret;
+
+ /* Select port */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_PWMSEL, bit, val ? bit : 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set direction to output & set output to 1 so that PWM can work */
+ ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write_bits(chip->regmap, CY8C95X0_OUTPUT_(port), bit, bit);
+}
+
+static int cy8c95x0_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = cy8c95x0_pinmux_cfg(chip, selector, group);
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static const struct pinmux_ops cy8c95x0_pmxops = {
+ .get_functions_count = cy8c95x0_get_functions_count,
+ .get_function_name = cy8c95x0_get_function_name,
+ .get_function_groups = cy8c95x0_get_function_groups,
+ .set_mux = cy8c95x0_set_mux,
+ .strict = true,
+};
+
+static int cy8c95x0_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+
+ return cy8c95x0_gpio_get_pincfg(chip, pin, config);
+}
+
+static int cy8c95x0_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = cy8c95x0_gpio_set_pincfg(chip, pin, configs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops cy8c95x0_pinconf_ops = {
+ .pin_config_get = cy8c95x0_pinconf_get,
+ .pin_config_set = cy8c95x0_pinconf_set,
+ .is_generic = true,
+};
+
+static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
+{
+ struct gpio_irq_chip *girq = &chip->gpio_chip.irq;
+ DECLARE_BITMAP(pending_irqs, MAX_LINE);
+ int ret;
+
+ mutex_init(&chip->irq_lock);
+
+ bitmap_zero(pending_irqs, MAX_LINE);
+
+ /* Read IRQ status register to clear all pending interrupts */
+ ret = cy8c95x0_irq_pending(chip, pending_irqs);
+ if (ret) {
+ dev_err(chip->dev, "failed to clear irq status register\n");
+ return ret;
+ }
+
+ /* Mask all interrupts */
+ bitmap_fill(chip->irq_mask, MAX_LINE);
+
+ gpio_irq_chip_set_chip(girq, &cy8c95x0_irqchip);
+
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(chip->dev, irq,
+ NULL, cy8c95x0_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ dev_name(chip->dev), chip);
+ if (ret) {
+ dev_err(chip->dev, "failed to request irq %d\n", irq);
+ return ret;
+ }
+ dev_info(chip->dev, "Registered threaded IRQ\n");
+
+ return 0;
+}
+
+static int cy8c95x0_setup_pinctrl(struct cy8c95x0_pinctrl *chip)
+{
+ struct pinctrl_desc *pd = &chip->pinctrl_desc;
+
+ pd->pctlops = &cy8c95x0_pinctrl_ops;
+ pd->confops = &cy8c95x0_pinconf_ops;
+ pd->pmxops = &cy8c95x0_pmxops;
+ pd->name = dev_name(chip->dev);
+ pd->pins = cy8c9560_pins;
+ pd->npins = chip->tpin;
+ pd->owner = THIS_MODULE;
+
+ chip->pctldev = devm_pinctrl_register(chip->dev, pd, chip);
+ if (IS_ERR(chip->pctldev))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->pctldev),
+ "can't register controller\n");
+
+ return 0;
+}
+
+static int cy8c95x0_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int ret;
+ const char *name;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, CY8C95X0_DEVID);
+ if (ret < 0)
+ return ret;
+ switch (ret & GENMASK(7, 4)) {
+ case 0x20:
+ name = cy8c95x0_id[0].name;
+ break;
+ case 0x40:
+ name = cy8c95x0_id[1].name;
+ break;
+ case 0x60:
+ name = cy8c95x0_id[2].name;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "Found a %s chip at 0x%02x.\n", name, client->addr);
+ strscpy(info->type, name, I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int cy8c95x0_probe(struct i2c_client *client)
+{
+ struct cy8c95x0_pinctrl *chip;
+ struct regulator *reg;
+ int ret;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &client->dev;
+
+ /* Set the device type */
+ chip->driver_data = (unsigned long)device_get_match_data(&client->dev);
+ if (!chip->driver_data)
+ chip->driver_data = i2c_match_id(cy8c95x0_id, client)->driver_data;
+ if (!chip->driver_data)
+ return -ENODEV;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->tpin = chip->driver_data & CY8C95X0_GPIO_MASK;
+ chip->nport = DIV_ROUND_UP(CY8C95X0_PIN_TO_OFFSET(chip->tpin), BANK_SZ);
+
+ switch (chip->tpin) {
+ case 20:
+ strscpy(chip->name, cy8c95x0_id[0].name, I2C_NAME_SIZE);
+ break;
+ case 40:
+ strscpy(chip->name, cy8c95x0_id[1].name, I2C_NAME_SIZE);
+ break;
+ case 60:
+ strscpy(chip->name, cy8c95x0_id[2].name, I2C_NAME_SIZE);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ ret = regulator_enable(reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable regulator vdd: %d\n", ret);
+ return ret;
+ }
+ chip->regulator = reg;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(client, &cy8c95x0_i2c_regmap);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ goto err_exit;
+ }
+
+ bitmap_zero(chip->push_pull, MAX_LINE);
+ bitmap_zero(chip->shiftmask, MAX_LINE);
+ bitmap_set(chip->shiftmask, 0, 20);
+ mutex_init(&chip->i2c_lock);
+
+ if (dmi_first_match(cy8c95x0_dmi_acpi_irq_info)) {
+ ret = cy8c95x0_acpi_get_irq(&client->dev);
+ if (ret > 0)
+ client->irq = ret;
+ }
+
+ if (client->irq) {
+ ret = cy8c95x0_irq_setup(chip, client->irq);
+ if (ret)
+ goto err_exit;
+ }
+
+ ret = cy8c95x0_setup_pinctrl(chip);
+ if (ret)
+ goto err_exit;
+
+ ret = cy8c95x0_setup_gpiochip(chip);
+ if (ret)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ if (!IS_ERR_OR_NULL(chip->regulator))
+ regulator_disable(chip->regulator);
+ return ret;
+}
+
+static void cy8c95x0_remove(struct i2c_client *client)
+{
+ struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(client);
+
+ if (!IS_ERR_OR_NULL(chip->regulator))
+ regulator_disable(chip->regulator);
+}
+
+static const struct acpi_device_id cy8c95x0_acpi_ids[] = {
+ { "INT3490", 40, },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cy8c95x0_acpi_ids);
+
+static struct i2c_driver cy8c95x0_driver = {
+ .driver = {
+ .name = "cy8c95x0-pinctrl",
+ .of_match_table = cy8c95x0_dt_ids,
+ .acpi_match_table = cy8c95x0_acpi_ids,
+ },
+ .probe_new = cy8c95x0_probe,
+ .remove = cy8c95x0_remove,
+ .id_table = cy8c95x0_id,
+ .detect = cy8c95x0_detect,
+};
+module_i2c_driver(cy8c95x0_driver);
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_AUTHOR("Naresh Solanki <naresh.solanki@9elements.com>");
+MODULE_DESCRIPTION("Pinctrl driver for CY8C95X0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 3a9ee9c8af11..7e732076dedf 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -12,14 +12,14 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -4152,7 +4152,7 @@ static const struct of_device_id ingenic_gpio_of_matches[] __initconst = {
};
static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
- struct device_node *node)
+ struct fwnode_handle *fwnode)
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
@@ -4160,7 +4160,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
unsigned int bank;
int err;
- err = of_property_read_u32(node, "reg", &bank);
+ err = fwnode_property_read_u32(fwnode, "reg", &bank);
if (err) {
dev_err(dev, "Cannot read \"reg\" property: %i\n", err);
return err;
@@ -4185,7 +4185,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.ngpio = 32;
jzgc->gc.parent = dev;
- jzgc->gc.of_node = node;
+ jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
jzgc->gc.set = ingenic_gpio_set;
@@ -4196,9 +4196,12 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.request = gpiochip_generic_request;
jzgc->gc.free = gpiochip_generic_free;
- jzgc->irq = irq_of_parse_and_map(node, 0);
- if (!jzgc->irq)
+ err = fwnode_irq_get(fwnode, 0);
+ if (err < 0)
+ return err;
+ if (!err)
return -EINVAL;
+ jzgc->irq = err;
girq = &jzgc->gc.irq;
gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
@@ -4227,12 +4230,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
struct pinctrl_desc *pctl_desc;
void __iomem *base;
const struct ingenic_chip_info *chip_info;
- struct device_node *node;
struct regmap_config regmap_config;
+ struct fwnode_handle *fwnode;
unsigned int i;
int err;
- chip_info = of_device_get_match_data(dev);
+ chip_info = device_get_match_data(dev);
if (!chip_info) {
dev_err(dev, "Unsupported SoC\n");
return -EINVAL;
@@ -4319,11 +4322,11 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
dev_set_drvdata(dev, jzpc->map);
- for_each_child_of_node(dev->of_node, node) {
- if (of_match_node(ingenic_gpio_of_matches, node)) {
- err = ingenic_gpio_probe(jzpc, node);
+ device_for_each_child_node(dev, fwnode) {
+ if (of_match_node(ingenic_gpio_of_matches, to_of_node(fwnode))) {
+ err = ingenic_gpio_probe(jzpc, fwnode);
if (err) {
- of_node_put(node);
+ fwnode_handle_put(fwnode);
return err;
}
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 695236636d05..5f356edfd0fd 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -549,9 +549,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
-#ifdef CONFIG_OF_GPIO
- mcp->chip.of_gpio_n_cells = 2;
-#endif
mcp->chip.base = base;
mcp->chip.can_sleep = true;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 2b4167a09b3b..af27b72c8958 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -865,9 +865,10 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->can_sleep = !bank->is_input;
if (bank->is_input && priv->properties->flags & SGPIO_FLAGS_HAS_IRQ) {
- int irq = fwnode_irq_get(fwnode, 0);
+ int irq;
- if (irq) {
+ irq = fwnode_irq_get(fwnode, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &microchip_sgpio_irqchip);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 83464e0bf4e6..62ce3957abe4 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -2129,4 +2129,6 @@ static struct platform_driver ocelot_pinctrl_driver = {
.remove = ocelot_pinctrl_remove,
};
module_platform_driver(ocelot_pinctrl_driver);
+
+MODULE_DESCRIPTION("Ocelot Chip Pinctrl Driver");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 5de691c630b4..7ca4ecb6eb8d 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -10,13 +10,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1347,46 +1347,51 @@ static struct pistachio_gpio_bank pistachio_gpio_banks[] = {
static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
{
- struct device_node *node = pctl->dev->of_node;
struct pistachio_gpio_bank *bank;
unsigned int i;
int irq, ret = 0;
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
- struct device_node *child;
+ struct fwnode_handle *child;
struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
- child = of_get_child_by_name(node, child_name);
+ child = device_get_named_child_node(pctl->dev, child_name);
if (!child) {
dev_err(pctl->dev, "No node for bank %u\n", i);
ret = -ENODEV;
goto err;
}
- if (!of_find_property(child, "gpio-controller", NULL)) {
+ if (!fwnode_property_present(child, "gpio-controller")) {
+ fwnode_handle_put(child);
dev_err(pctl->dev,
"No gpio-controller property for bank %u\n", i);
- of_node_put(child);
ret = -ENODEV;
goto err;
}
- irq = irq_of_parse_and_map(child, 0);
- if (!irq) {
+ ret = fwnode_irq_get(child, 0);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ dev_err(pctl->dev, "Failed to retrieve IRQ for bank %u\n", i);
+ goto err;
+ }
+ if (!ret) {
+ fwnode_handle_put(child);
dev_err(pctl->dev, "No IRQ for bank %u\n", i);
- of_node_put(child);
ret = -EINVAL;
goto err;
}
+ irq = ret;
bank = &pctl->gpio_banks[i];
bank->pctl = pctl;
bank->base = pctl->base + GPIO_BANK_BASE(i);
bank->gpio_chip.parent = pctl->dev;
- bank->gpio_chip.of_node = child;
+ bank->gpio_chip.fwnode = child;
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 32e41395fc76..53bdfc40f055 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -57,6 +57,7 @@
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
#define IOMUX_WIDTH_2BIT BIT(5)
+#define IOMUX_L_SOURCE_PMU BIT(6)
#define PIN_BANK(id, pins, label) \
{ \
@@ -147,6 +148,21 @@
.pull_type[3] = pull3, \
}
+#define PIN_BANK_IOMUX_FLAGS_OFFSET(id, pins, label, iom0, iom1, iom2, \
+ iom3, offset0, offset1, offset2, \
+ offset3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = offset0 }, \
+ { .type = iom1, .offset = offset1 }, \
+ { .type = iom2, .offset = offset2 }, \
+ { .type = iom3, .offset = offset3 }, \
+ }, \
+ }
+
#define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1, \
iom2, iom3, drv0, drv1, drv2, \
drv3, offset0, offset1, \
@@ -443,6 +459,37 @@ static struct rockchip_mux_recalced_data rv1108_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rv1126_mux_recalced_data[] = {
+ {
+ .num = 0,
+ .pin = 20,
+ .reg = 0x10000,
+ .bit = 0,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 21,
+ .reg = 0x10000,
+ .bit = 4,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 22,
+ .reg = 0x10000,
+ .bit = 8,
+ .mask = 0xf
+ },
+ {
+ .num = 0,
+ .pin = 23,
+ .reg = 0x10000,
+ .bit = 12,
+ .mask = 0xf
+ },
+};
+
static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
{
.num = 2,
@@ -642,6 +689,103 @@ static struct rockchip_mux_route_data px30_mux_route_data[] = {
RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
};
+static struct rockchip_mux_route_data rv1126_mux_route_data[] = {
+ RK_MUXROUTE_GRF(3, RK_PD2, 1, 0x10260, WRITE_MASK_VAL(0, 0, 0)), /* I2S0_MCLK_M0 */
+ RK_MUXROUTE_GRF(3, RK_PB0, 3, 0x10260, WRITE_MASK_VAL(0, 0, 1)), /* I2S0_MCLK_M1 */
+
+ RK_MUXROUTE_GRF(0, RK_PD4, 4, 0x10260, WRITE_MASK_VAL(3, 2, 0)), /* I2S1_MCLK_M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x10260, WRITE_MASK_VAL(3, 2, 1)), /* I2S1_MCLK_M1 */
+ RK_MUXROUTE_GRF(2, RK_PC7, 6, 0x10260, WRITE_MASK_VAL(3, 2, 2)), /* I2S1_MCLK_M2 */
+
+ RK_MUXROUTE_GRF(1, RK_PD0, 1, 0x10260, WRITE_MASK_VAL(4, 4, 0)), /* I2S2_MCLK_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB3, 2, 0x10260, WRITE_MASK_VAL(4, 4, 1)), /* I2S2_MCLK_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PD4, 2, 0x10260, WRITE_MASK_VAL(12, 12, 0)), /* PDM_CLK0_M0 */
+ RK_MUXROUTE_GRF(3, RK_PC0, 3, 0x10260, WRITE_MASK_VAL(12, 12, 1)), /* PDM_CLK0_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PC6, 1, 0x10264, WRITE_MASK_VAL(0, 0, 0)), /* CIF_CLKOUT_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD1, 3, 0x10264, WRITE_MASK_VAL(0, 0, 1)), /* CIF_CLKOUT_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 5, 0x10264, WRITE_MASK_VAL(5, 4, 0)), /* I2C3_SCL_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD4, 7, 0x10264, WRITE_MASK_VAL(5, 4, 1)), /* I2C3_SCL_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD6, 3, 0x10264, WRITE_MASK_VAL(5, 4, 2)), /* I2C3_SCL_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA0, 7, 0x10264, WRITE_MASK_VAL(6, 6, 0)), /* I2C4_SCL_M0 */
+ RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x10264, WRITE_MASK_VAL(6, 6, 1)), /* I2C4_SCL_M1 */
+
+ RK_MUXROUTE_GRF(2, RK_PA5, 7, 0x10264, WRITE_MASK_VAL(9, 8, 0)), /* I2C5_SCL_M0 */
+ RK_MUXROUTE_GRF(3, RK_PB0, 5, 0x10264, WRITE_MASK_VAL(9, 8, 1)), /* I2C5_SCL_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD0, 4, 0x10264, WRITE_MASK_VAL(9, 8, 2)), /* I2C5_SCL_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PC0, 5, 0x10264, WRITE_MASK_VAL(11, 10, 0)), /* SPI1_CLK_M0 */
+ RK_MUXROUTE_GRF(1, RK_PC6, 3, 0x10264, WRITE_MASK_VAL(11, 10, 1)), /* SPI1_CLK_M1 */
+ RK_MUXROUTE_GRF(2, RK_PD5, 6, 0x10264, WRITE_MASK_VAL(11, 10, 2)), /* SPI1_CLK_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x10264, WRITE_MASK_VAL(12, 12, 0)), /* RGMII_CLK_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB7, 2, 0x10264, WRITE_MASK_VAL(12, 12, 1)), /* RGMII_CLK_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA1, 3, 0x10264, WRITE_MASK_VAL(13, 13, 0)), /* CAN_TXD_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA7, 5, 0x10264, WRITE_MASK_VAL(13, 13, 1)), /* CAN_TXD_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 6, 0x10268, WRITE_MASK_VAL(0, 0, 0)), /* PWM8_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD7, 5, 0x10268, WRITE_MASK_VAL(0, 0, 1)), /* PWM8_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA5, 6, 0x10268, WRITE_MASK_VAL(2, 2, 0)), /* PWM9_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD6, 5, 0x10268, WRITE_MASK_VAL(2, 2, 1)), /* PWM9_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA6, 6, 0x10268, WRITE_MASK_VAL(4, 4, 0)), /* PWM10_M0 */
+ RK_MUXROUTE_GRF(2, RK_PD5, 5, 0x10268, WRITE_MASK_VAL(4, 4, 1)), /* PWM10_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PA7, 6, 0x10268, WRITE_MASK_VAL(6, 6, 0)), /* PWM11_IR_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA1, 5, 0x10268, WRITE_MASK_VAL(6, 6, 1)), /* PWM11_IR_M1 */
+
+ RK_MUXROUTE_GRF(1, RK_PA5, 3, 0x10268, WRITE_MASK_VAL(8, 8, 0)), /* UART2_TX_M0 */
+ RK_MUXROUTE_GRF(3, RK_PA2, 1, 0x10268, WRITE_MASK_VAL(8, 8, 1)), /* UART2_TX_M1 */
+
+ RK_MUXROUTE_GRF(3, RK_PC6, 3, 0x10268, WRITE_MASK_VAL(11, 10, 0)), /* UART3_TX_M0 */
+ RK_MUXROUTE_GRF(1, RK_PA7, 2, 0x10268, WRITE_MASK_VAL(11, 10, 1)), /* UART3_TX_M1 */
+ RK_MUXROUTE_GRF(3, RK_PA0, 4, 0x10268, WRITE_MASK_VAL(11, 10, 2)), /* UART3_TX_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA4, 4, 0x10268, WRITE_MASK_VAL(13, 12, 0)), /* UART4_TX_M0 */
+ RK_MUXROUTE_GRF(2, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(13, 12, 1)), /* UART4_TX_M1 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x10268, WRITE_MASK_VAL(13, 12, 2)), /* UART4_TX_M2 */
+
+ RK_MUXROUTE_GRF(3, RK_PA6, 4, 0x10268, WRITE_MASK_VAL(15, 14, 0)), /* UART5_TX_M0 */
+ RK_MUXROUTE_GRF(2, RK_PB0, 4, 0x10268, WRITE_MASK_VAL(15, 14, 1)), /* UART5_TX_M1 */
+ RK_MUXROUTE_GRF(2, RK_PA0, 3, 0x10268, WRITE_MASK_VAL(15, 14, 2)), /* UART5_TX_M2 */
+
+ RK_MUXROUTE_PMU(0, RK_PB6, 3, 0x0114, WRITE_MASK_VAL(0, 0, 0)), /* PWM0_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB3, 5, 0x0114, WRITE_MASK_VAL(0, 0, 1)), /* PWM0_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB7, 3, 0x0114, WRITE_MASK_VAL(2, 2, 0)), /* PWM1_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB2, 5, 0x0114, WRITE_MASK_VAL(2, 2, 1)), /* PWM1_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC0, 3, 0x0114, WRITE_MASK_VAL(4, 4, 0)), /* PWM2_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB1, 5, 0x0114, WRITE_MASK_VAL(4, 4, 1)), /* PWM2_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC1, 3, 0x0114, WRITE_MASK_VAL(6, 6, 0)), /* PWM3_IR_M0 */
+ RK_MUXROUTE_PMU(2, RK_PB0, 5, 0x0114, WRITE_MASK_VAL(6, 6, 1)), /* PWM3_IR_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC2, 3, 0x0114, WRITE_MASK_VAL(8, 8, 0)), /* PWM4_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA7, 5, 0x0114, WRITE_MASK_VAL(8, 8, 1)), /* PWM4_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PC3, 3, 0x0114, WRITE_MASK_VAL(10, 10, 0)), /* PWM5_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA6, 5, 0x0114, WRITE_MASK_VAL(10, 10, 1)), /* PWM5_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB2, 3, 0x0114, WRITE_MASK_VAL(12, 12, 0)), /* PWM6_M0 */
+ RK_MUXROUTE_PMU(2, RK_PD4, 5, 0x0114, WRITE_MASK_VAL(12, 12, 1)), /* PWM6_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB1, 3, 0x0114, WRITE_MASK_VAL(14, 14, 0)), /* PWM7_IR_M0 */
+ RK_MUXROUTE_PMU(3, RK_PA0, 5, 0x0114, WRITE_MASK_VAL(14, 14, 1)), /* PWM7_IR_M1 */
+
+ RK_MUXROUTE_PMU(0, RK_PB0, 1, 0x0118, WRITE_MASK_VAL(1, 0, 0)), /* SPI0_CLK_M0 */
+ RK_MUXROUTE_PMU(2, RK_PA1, 1, 0x0118, WRITE_MASK_VAL(1, 0, 1)), /* SPI0_CLK_M1 */
+ RK_MUXROUTE_PMU(2, RK_PB2, 6, 0x0118, WRITE_MASK_VAL(1, 0, 2)), /* SPI0_CLK_M2 */
+
+ RK_MUXROUTE_PMU(0, RK_PB6, 2, 0x0118, WRITE_MASK_VAL(2, 2, 0)), /* UART1_TX_M0 */
+ RK_MUXROUTE_PMU(1, RK_PD0, 5, 0x0118, WRITE_MASK_VAL(2, 2, 1)), /* UART1_TX_M1 */
+};
+
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x144, BIT(16 + 3) | BIT(16 + 4)), /* spi-0 */
RK_MUXROUTE_SAME(1, RK_PD3, 3, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(3)), /* spi-1 */
@@ -877,8 +1021,12 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
return RK_FUNC_GPIO;
- regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
- ? info->regmap_pmu : info->regmap_base;
+ if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ regmap = info->regmap_pmu;
+ else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
+ regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
+ else
+ regmap = info->regmap_base;
/* get basic quadrupel of mux registers and the correct reg inside */
mux_type = bank->iomux[iomux_num].type;
@@ -987,8 +1135,12 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
dev_dbg(dev, "setting mux of GPIO%d-%d to %d\n", bank->bank_num, pin, mux);
- regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
- ? info->regmap_pmu : info->regmap_base;
+ if (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ regmap = info->regmap_pmu;
+ else if (bank->iomux[iomux_num].type & IOMUX_L_SOURCE_PMU)
+ regmap = (pin % 8 < 4) ? info->regmap_pmu : info->regmap_base;
+ else
+ regmap = info->regmap_base;
/* get basic quadrupel of mux registers and the correct reg inside */
mux_type = bank->iomux[iomux_num].type;
@@ -1268,6 +1420,119 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RV1126_PULL_PMU_OFFSET 0x40
+#define RV1126_PULL_GRF_GPIO1A0_OFFSET 0x10108
+#define RV1126_PULL_PINS_PER_REG 8
+#define RV1126_PULL_BITS_PER_PIN 2
+#define RV1126_PULL_BANK_STRIDE 16
+#define RV1126_GPIO_C4_D7(p) (p >= 20 && p <= 31) /* GPIO0_C4 ~ GPIO0_D7 */
+
+static int rv1126_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_PULL_PINS_PER_REG + 1) * 4);
+ *bit = pin_num % RV1126_PULL_PINS_PER_REG;
+ *bit *= RV1126_PULL_BITS_PER_PIN;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_PULL_PMU_OFFSET;
+ } else {
+ *reg = RV1126_PULL_GRF_GPIO1A0_OFFSET;
+ *regmap = info->regmap_base;
+ *reg += (bank->bank_num - 1) * RV1126_PULL_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RV1126_PULL_PINS_PER_REG) * 4);
+ *bit = (pin_num % RV1126_PULL_PINS_PER_REG);
+ *bit *= RV1126_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RV1126_DRV_PMU_OFFSET 0x20
+#define RV1126_DRV_GRF_GPIO1A0_OFFSET 0x10090
+#define RV1126_DRV_BITS_PER_PIN 4
+#define RV1126_DRV_PINS_PER_REG 4
+#define RV1126_DRV_BANK_STRIDE 32
+
+static int rv1126_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_DRV_PINS_PER_REG + 1) * 4);
+ *reg -= 0x4;
+ *bit = pin_num % RV1126_DRV_PINS_PER_REG;
+ *bit *= RV1126_DRV_BITS_PER_PIN;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_DRV_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RV1126_DRV_GRF_GPIO1A0_OFFSET;
+ *reg += (bank->bank_num - 1) * RV1126_DRV_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RV1126_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RV1126_DRV_PINS_PER_REG;
+ *bit *= RV1126_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RV1126_SCHMITT_PMU_OFFSET 0x60
+#define RV1126_SCHMITT_GRF_GPIO1A0_OFFSET 0x10188
+#define RV1126_SCHMITT_BANK_STRIDE 16
+#define RV1126_SCHMITT_PINS_PER_GRF_REG 8
+#define RV1126_SCHMITT_PINS_PER_PMU_REG 8
+
+static int rv1126_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ int pins_per_reg;
+
+ if (bank->bank_num == 0) {
+ if (RV1126_GPIO_C4_D7(pin_num)) {
+ *regmap = info->regmap_base;
+ *reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
+ *reg -= (((31 - pin_num) / RV1126_SCHMITT_PINS_PER_GRF_REG + 1) * 4);
+ *bit = pin_num % RV1126_SCHMITT_PINS_PER_GRF_REG;
+ return 0;
+ }
+ *regmap = info->regmap_pmu;
+ *reg = RV1126_SCHMITT_PMU_OFFSET;
+ pins_per_reg = RV1126_SCHMITT_PINS_PER_PMU_REG;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RV1126_SCHMITT_GRF_GPIO1A0_OFFSET;
+ pins_per_reg = RV1126_SCHMITT_PINS_PER_GRF_REG;
+ *reg += (bank->bank_num - 1) * RV1126_SCHMITT_BANK_STRIDE;
+ }
+ *reg += ((pin_num / pins_per_reg) * 4);
+ *bit = pin_num % pins_per_reg;
+
+ return 0;
+}
+
#define RK3308_SCHMITT_PINS_PER_REG 8
#define RK3308_SCHMITT_BANK_STRIDE 16
#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
@@ -1998,6 +2263,12 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
goto config;
}
+ if (ctrl->type == RV1126) {
+ rmask_bits = RV1126_DRV_BITS_PER_PIN;
+ ret = strength;
+ goto config;
+ }
+
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
if (rockchip_perpin_drv_list[drv_type][i] == strength) {
@@ -2168,6 +2439,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
break;
case PX30:
case RV1108:
+ case RV1126:
case RK3188:
case RK3288:
case RK3308:
@@ -2393,11 +2665,24 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
+static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input)
+{
+ struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct rockchip_pin_bank *bank;
+
+ bank = pin_to_bank(info, offset);
+ return rockchip_set_mux(bank, offset - bank->pin_base, RK_FUNC_GPIO);
+}
+
static const struct pinmux_ops rockchip_pmx_ops = {
.get_functions_count = rockchip_pmx_get_funcs_count,
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
.set_mux = rockchip_pmx_set,
+ .gpio_set_direction = rockchip_pmx_gpio_set_direction,
};
/*
@@ -2416,6 +2701,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return pull ? false : true;
case PX30:
case RV1108:
+ case RV1126:
case RK3188:
case RK3288:
case RK3308:
@@ -2889,12 +3175,14 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
/* preset iomux offset value, set new start value */
if (iom->offset >= 0) {
- if (iom->type & IOMUX_SOURCE_PMU)
+ if ((iom->type & IOMUX_SOURCE_PMU) ||
+ (iom->type & IOMUX_L_SOURCE_PMU))
pmu_offs = iom->offset;
else
grf_offs = iom->offset;
} else { /* set current iomux offset */
- iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+ iom->offset = ((iom->type & IOMUX_SOURCE_PMU) ||
+ (iom->type & IOMUX_L_SOURCE_PMU)) ?
pmu_offs : grf_offs;
}
@@ -2919,7 +3207,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
inc = (iom->type & (IOMUX_WIDTH_4BIT |
IOMUX_WIDTH_3BIT |
IOMUX_WIDTH_2BIT)) ? 8 : 4;
- if (iom->type & IOMUX_SOURCE_PMU)
+ if ((iom->type & IOMUX_SOURCE_PMU) || (iom->type & IOMUX_L_SOURCE_PMU))
pmu_offs += inc;
else
grf_offs += inc;
@@ -3178,6 +3466,48 @@ static struct rockchip_pin_ctrl rv1108_pin_ctrl = {
.schmitt_calc_reg = rv1108_calc_schmitt_reg_and_bit,
};
+static struct rockchip_pin_bank rv1126_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT | IOMUX_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT | IOMUX_L_SOURCE_PMU,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10010, 0x10018, 0x10020, 0x10028),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 2, "gpio4",
+ IOMUX_WIDTH_4BIT, 0, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rv1126_pin_ctrl = {
+ .pin_banks = rv1126_pin_banks,
+ .nr_banks = ARRAY_SIZE(rv1126_pin_banks),
+ .label = "RV1126-GPIO",
+ .type = RV1126,
+ .grf_mux_offset = 0x10004, /* mux offset from GPIO0_D0 */
+ .pmu_mux_offset = 0x0,
+ .iomux_routes = rv1126_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rv1126_mux_route_data),
+ .iomux_recalced = rv1126_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rv1126_mux_recalced_data),
+ .pull_calc_reg = rv1126_calc_pull_reg_and_bit,
+ .drv_calc_reg = rv1126_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rv1126_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk2928_pin_banks[] = {
PIN_BANK(0, 32, "gpio0"),
PIN_BANK(1, 32, "gpio1"),
@@ -3568,6 +3898,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &px30_pin_ctrl },
{ .compatible = "rockchip,rv1108-pinctrl",
.data = &rv1108_pin_ctrl },
+ { .compatible = "rockchip,rv1126-pinctrl",
+ .data = &rv1126_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
.data = &rk2928_pin_ctrl },
{ .compatible = "rockchip,rk3036-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index ec46f8815ac9..4759f336941e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -186,6 +186,7 @@
enum rockchip_pinctrl_type {
PX30,
RV1108,
+ RV1126,
RK2928,
RK3066B,
RK3128,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 0fea71fd9a00..cf7f9cbe6044 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h> /* of_get_named_gpio() */
#include <linux/of_address.h>
#include <linux/gpio/driver.h>
#include <linux/regmap.h>
@@ -1162,6 +1161,31 @@ static void st_parse_syscfgs(struct st_pinctrl *info, int bank,
return;
}
+static int st_pctl_dt_calculate_pin(struct st_pinctrl *info,
+ phandle bank, unsigned int offset)
+{
+ struct device_node *np;
+ struct gpio_chip *chip;
+ int retval = -EINVAL;
+ int i;
+
+ np = of_find_node_by_phandle(bank);
+ if (!np)
+ return -EINVAL;
+
+ for (i = 0; i < info->nbanks; i++) {
+ chip = &info->banks[i].gpio_chip;
+ if (chip->of_node == np) {
+ if (offset < chip->ngpio)
+ retval = chip->base + offset;
+ break;
+ }
+ }
+
+ of_node_put(np);
+ return retval;
+}
+
/*
* Each pin is represented in of the below forms.
* <bank offset mux direction rt_type rt_delay rt_clk>
@@ -1175,6 +1199,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
struct device *dev = info->dev;
struct st_pinconf *conf;
struct device_node *pins;
+ phandle bank;
+ unsigned int offset;
int i = 0, npins = 0, nr_props, ret = 0;
pins = of_get_child_by_name(np, "st,pins");
@@ -1214,9 +1240,9 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
conf = &grp->pin_conf[i];
/* bank & offset */
- be32_to_cpup(list++);
- be32_to_cpup(list++);
- conf->pin = of_get_named_gpio(pins, pp->name, 0);
+ bank = be32_to_cpup(list++);
+ offset = be32_to_cpup(list++);
+ conf->pin = st_pctl_dt_calculate_pin(info, bank, offset);
conf->name = pp->name;
grp->pins[i] = conf->pin;
/* mux */
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index f415c13caae0..9dc2d803a586 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -15,6 +15,7 @@ config PINCTRL_MSM
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -23,6 +24,7 @@ config PINCTRL_APQ8064
config PINCTRL_APQ8084
tristate "Qualcomm APQ8084 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -31,6 +33,7 @@ config PINCTRL_APQ8084
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -39,6 +42,7 @@ config PINCTRL_IPQ4019
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -47,6 +51,7 @@ config PINCTRL_IPQ8064
config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -57,6 +62,7 @@ config PINCTRL_IPQ8074
config PINCTRL_IPQ6018
tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -67,6 +73,7 @@ config PINCTRL_IPQ6018
config PINCTRL_MSM8226
tristate "Qualcomm 8226 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -76,6 +83,7 @@ config PINCTRL_MSM8226
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -84,6 +92,7 @@ config PINCTRL_MSM8660
config PINCTRL_MSM8960
tristate "Qualcomm 8960 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -100,6 +109,7 @@ config PINCTRL_MDM9607
config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -108,6 +118,7 @@ config PINCTRL_MDM9615
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -116,6 +127,7 @@ config PINCTRL_MSM8X74
config PINCTRL_MSM8909
tristate "Qualcomm 8909 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -132,6 +144,7 @@ config PINCTRL_MSM8916
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -142,6 +155,7 @@ config PINCTRL_MSM8953
config PINCTRL_MSM8976
tristate "Qualcomm 8976 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -152,6 +166,7 @@ config PINCTRL_MSM8976
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -161,6 +176,7 @@ config PINCTRL_MSM8994
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -169,6 +185,7 @@ config PINCTRL_MSM8996
config PINCTRL_MSM8998
tristate "Qualcomm MSM8998 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -177,6 +194,7 @@ config PINCTRL_MSM8998
config PINCTRL_QCM2290
tristate "Qualcomm QCM2290 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -185,6 +203,7 @@ config PINCTRL_QCM2290
config PINCTRL_QCS404
tristate "Qualcomm QCS404 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -232,6 +251,7 @@ config PINCTRL_QCOM_SSBI_PMIC
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -241,6 +261,7 @@ config PINCTRL_SC7180
config PINCTRL_SC7280
tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -250,6 +271,7 @@ config PINCTRL_SC7280
config PINCTRL_SC7280_LPASS_LPI
tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver"
depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -259,6 +281,7 @@ config PINCTRL_SC7280_LPASS_LPI
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on (OF || ACPI)
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -268,6 +291,7 @@ config PINCTRL_SC8180X
config PINCTRL_SC8280XP
tristate "Qualcomm Technologies Inc SC8280xp pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -277,6 +301,7 @@ config PINCTRL_SC8280XP
config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -286,6 +311,7 @@ config PINCTRL_SDM660
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on (OF || ACPI)
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -295,6 +321,7 @@ config PINCTRL_SDM845
config PINCTRL_SDX55
tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
depends on OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -304,6 +331,7 @@ config PINCTRL_SDX55
config PINCTRL_SM6115
tristate "Qualcomm Technologies Inc SM6115,SM4250 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -313,6 +341,7 @@ config PINCTRL_SM6115
config PINCTRL_SM6125
tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -322,6 +351,7 @@ config PINCTRL_SM6125
config PINCTRL_SM6350
tristate "Qualcomm Technologies Inc SM6350 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -331,6 +361,7 @@ config PINCTRL_SM6350
config PINCTRL_SM6375
tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -340,6 +371,7 @@ config PINCTRL_SM6375
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -349,6 +381,7 @@ config PINCTRL_SDX65
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -358,6 +391,7 @@ config PINCTRL_SM8150
config PINCTRL_SM8250
tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
depends on OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -367,6 +401,7 @@ config PINCTRL_SM8250
config PINCTRL_SM8250_LPASS_LPI
tristate "Qualcomm Technologies Inc SM8250 LPASS LPI pin controller driver"
depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -375,6 +410,7 @@ config PINCTRL_SM8250_LPASS_LPI
config PINCTRL_SM8350
tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -384,12 +420,33 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8450 platform.
+config PINCTRL_SM8450_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM8450 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM8450 platform.
+
+config PINCTRL_SC8280XP_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SC8280XP LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SC8280XP platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index fbd64853a24d..8269a1db8794 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -45,4 +45,6 @@ obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
+obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
new file mode 100644
index 000000000000..4b9c0beac32e
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+static int gpio15_pins[] = { 15 };
+static int gpio16_pins[] = { 16 };
+static int gpio17_pins[] = { 17 };
+static int gpio18_pins[] = { 18 };
+
+static const struct pinctrl_pin_desc sc8280xp_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s3_clk_groups[] = { "gpio12"};
+static const char * const i2s3_ws_groups[] = { "gpio13"};
+static const char * const i2s3_data_groups[] = { "gpio17", "gpio18"};
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+
+static const struct lpi_pingroup sc8280xp_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk1_c, qua_mi2s_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s3_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s3_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, _, _, _),
+ LPI_PINGROUP(15, 20, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 22, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s3_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s3_data, _, _),
+};
+
+static const struct lpi_function sc8280xp_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+};
+
+static const struct lpi_pinctrl_variant_data sc8280xp_lpi_data = {
+ .pins = sc8280xp_lpi_pins,
+ .npins = ARRAY_SIZE(sc8280xp_lpi_pins),
+ .groups = sc8280xp_groups,
+ .ngroups = ARRAY_SIZE(sc8280xp_groups),
+ .functions = sc8280xp_functions,
+ .nfunctions = ARRAY_SIZE(sc8280xp_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sc8280xp-lpass-lpi-pinctrl",
+ .data = &sc8280xp_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sc8280xp-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SC8280XP LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
new file mode 100644
index 000000000000..c3c8c34148f1
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_i2s4_clk,
+ LPI_MUX_i2s4_data,
+ LPI_MUX_i2s4_ws,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_slimbus_clk,
+ LPI_MUX_slimbus_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_ext_mclk1_d,
+ LPI_MUX_ext_mclk1_e,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+static int gpio15_pins[] = { 15 };
+static int gpio16_pins[] = { 16 };
+static int gpio17_pins[] = { 17 };
+static int gpio18_pins[] = { 18 };
+static int gpio19_pins[] = { 19 };
+static int gpio20_pins[] = { 20 };
+static int gpio21_pins[] = { 21 };
+static int gpio22_pins[] = { 22 };
+
+static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+};
+
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s4_ws_groups[] = { "gpio13" };
+static const char * const i2s4_clk_groups[] = { "gpio12" };
+static const char * const i2s4_data_groups[] = { "gpio17", "gpio18" };
+static const char * const slimbus_clk_groups[] = { "gpio19"};
+static const char * const i2s3_clk_groups[] = { "gpio19"};
+static const char * const i2s3_ws_groups[] = { "gpio20"};
+static const char * const i2s3_data_groups[] = { "gpio21", "gpio22"};
+static const char * const slimbus_data_groups[] = { "gpio20"};
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+static const char * const ext_mclk1_d_groups[] = { "gpio14" };
+static const char * const ext_mclk1_e_groups[] = { "gpio22" };
+
+static const struct lpi_pingroup sm8450_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk1_c, qua_mi2s_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s4_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s4_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, ext_mclk1_d, _, _),
+ LPI_PINGROUP(15, 20, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 22, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s4_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s4_data, _, _),
+ LPI_PINGROUP(19, LPI_NO_SLEW, i2s3_clk, slimbus_clk, _, _),
+ LPI_PINGROUP(20, LPI_NO_SLEW, i2s3_ws, slimbus_data, _, _),
+ LPI_PINGROUP(21, LPI_NO_SLEW, i2s3_data, _, _, _),
+ LPI_PINGROUP(22, LPI_NO_SLEW, i2s3_data, ext_mclk1_e, _, _),
+};
+
+static const struct lpi_function sm8450_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(i2s4_clk),
+ LPI_FUNCTION(i2s4_data),
+ LPI_FUNCTION(i2s4_ws),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(slimbus_clk),
+ LPI_FUNCTION(slimbus_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+ LPI_FUNCTION(ext_mclk1_d),
+ LPI_FUNCTION(ext_mclk1_e),
+};
+
+static const struct lpi_pinctrl_variant_data sm8450_lpi_data = {
+ .pins = sm8450_lpi_pins,
+ .npins = ARRAY_SIZE(sm8450_lpi_pins),
+ .groups = sm8450_groups,
+ .ngroups = ARRAY_SIZE(sm8450_groups),
+ .functions = sm8450_functions,
+ .nfunctions = ARRAY_SIZE(sm8450_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8450-lpass-lpi-pinctrl",
+ .data = &sm8450_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm8450-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SM8450 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index ccaf40a9c0e6..8c31a8f6b7e4 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/gpio/driver.h>
@@ -36,6 +37,8 @@
#define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd
#define PMIC_GPIO_SUBTYPE_GPIO_LV 0x10
#define PMIC_GPIO_SUBTYPE_GPIO_MV 0x11
+#define PMIC_GPIO_SUBTYPE_GPIO_LV_VIN2 0x12
+#define PMIC_GPIO_SUBTYPE_GPIO_MV_VIN3 0x13
#define PMIC_MPP_REG_RT_STS 0x10
#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
@@ -98,6 +101,9 @@
#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS 1
#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS 2
+#define PMIC_GPIO_OUT_STRENGTH_LOW 1
+#define PMIC_GPIO_OUT_STRENGTH_HIGH 3
+
/* PMIC_GPIO_REG_EN_CTL */
#define PMIC_GPIO_REG_MASTER_EN_SHIFT 7
@@ -171,7 +177,6 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
- struct irq_chip irq;
u8 usid;
u8 pid_base;
};
@@ -437,7 +442,17 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
arg = pad->pullup;
break;
case PMIC_GPIO_CONF_STRENGTH:
- arg = pad->strength;
+ switch (pad->strength) {
+ case PMIC_GPIO_OUT_STRENGTH_HIGH:
+ arg = PMIC_GPIO_STRENGTH_HIGH;
+ break;
+ case PMIC_GPIO_OUT_STRENGTH_LOW:
+ arg = PMIC_GPIO_STRENGTH_LOW;
+ break;
+ default:
+ arg = pad->strength;
+ break;
+ }
break;
case PMIC_GPIO_CONF_ATEST:
arg = pad->atest;
@@ -524,7 +539,17 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PMIC_GPIO_CONF_STRENGTH:
if (arg > PMIC_GPIO_STRENGTH_LOW)
return -EINVAL;
- pad->strength = arg;
+ switch (arg) {
+ case PMIC_GPIO_STRENGTH_HIGH:
+ pad->strength = PMIC_GPIO_OUT_STRENGTH_HIGH;
+ break;
+ case PMIC_GPIO_STRENGTH_LOW:
+ pad->strength = PMIC_GPIO_OUT_STRENGTH_LOW;
+ break;
+ default:
+ pad->strength = arg;
+ break;
+ }
break;
case PMIC_GPIO_CONF_ATEST:
if (!pad->lv_mv_type || arg > 4)
@@ -823,6 +848,16 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
pad->have_buffer = true;
pad->lv_mv_type = true;
break;
+ case PMIC_GPIO_SUBTYPE_GPIO_LV_VIN2:
+ pad->num_sources = 2;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
+ case PMIC_GPIO_SUBTYPE_GPIO_MV_VIN3:
+ pad->num_sources = 3;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
default:
dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
return -ENODEV;
@@ -985,6 +1020,33 @@ static int pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
return 0;
}
+static void pmic_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ irq_chip_mask_parent(data);
+ gpiochip_disable_irq(gc, data->hwirq);
+}
+
+static void pmic_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ gpiochip_enable_irq(gc, data->hwirq);
+ irq_chip_unmask_parent(data);
+}
+
+static const struct irq_chip spmi_gpio_irq_chip = {
+ .name = "spmi-gpio",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pmic_gpio_irq_mask,
+ .irq_unmask = pmic_gpio_irq_unmask,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = irq_chip_set_wake_parent,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int pmic_gpio_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain;
@@ -1078,16 +1140,8 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- state->irq.name = "spmi-gpio",
- state->irq.irq_ack = irq_chip_ack_parent,
- state->irq.irq_mask = irq_chip_mask_parent,
- state->irq.irq_unmask = irq_chip_unmask_parent,
- state->irq.irq_set_type = irq_chip_set_type_parent,
- state->irq.irq_set_wake = irq_chip_set_wake_parent,
- state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
-
girq = &state->chip.irq;
- girq->chip = &state->irq;
+ gpio_irq_chip_set_chip(girq, &spmi_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
@@ -1147,6 +1201,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm6350-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pm7250b-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 4837bceb767b..bd13b5ef246d 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1166,15 +1166,15 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ret)
goto err_put_banks;
- ret = samsung_gpiolib_register(pdev, drvdata);
- if (ret)
- goto err_unregister;
-
if (ctrl->eint_gpio_init)
ctrl->eint_gpio_init(drvdata);
if (ctrl->eint_wkup_init)
ctrl->eint_wkup_init(drvdata);
+ ret = samsung_gpiolib_register(pdev, drvdata);
+ if (ret)
+ goto err_unregister;
+
platform_set_drvdata(pdev, drvdata);
return 0;
diff --git a/drivers/pinctrl/starfive/Kconfig b/drivers/pinctrl/starfive/Kconfig
new file mode 100644
index 000000000000..55c514e622f9
--- /dev/null
+++ b/drivers/pinctrl/starfive/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PINCTRL_STARFIVE_JH7100
+ tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ depends on OF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+ default SOC_STARFIVE
+ help
+ Say yes here to support pin control on the StarFive JH7100 SoC.
+ This also provides an interface to the GPIO pins not used by other
+ peripherals supporting inputs, outputs, configuring pull-up/pull-down
+ and interrupts on input changes.
diff --git a/drivers/pinctrl/starfive/Makefile b/drivers/pinctrl/starfive/Makefile
new file mode 100644
index 000000000000..0293f26a0a99
--- /dev/null
+++ b/drivers/pinctrl/starfive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PINCTRL_STARFIVE_JH7100) += pinctrl-starfive-jh7100.o
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index 3eb40e230d98..5b544fb7f3d8 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -20,12 +20,12 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
-#include <dt-bindings/pinctrl/pinctrl-starfive.h>
+#include <dt-bindings/pinctrl/pinctrl-starfive-jh7100.h>
-#include "core.h"
-#include "pinctrl-utils.h"
-#include "pinmux.h"
-#include "pinconf.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "../pinconf.h"
#define DRIVER_NAME "pinctrl-starfive"
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 14bcca73238a..e485506ea599 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1603,10 +1603,9 @@ int stm32_pctl_probe(struct platform_device *pdev)
bank->clk = of_clk_get_by_name(np, NULL);
if (IS_ERR(bank->clk)) {
- if (PTR_ERR(bank->clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk));
fwnode_handle_put(child);
- return PTR_ERR(bank->clk);
+ return dev_err_probe(dev, PTR_ERR(bank->clk),
+ "failed to get clk\n");
}
i++;
}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index 31d62bbb7f43..96a350e70668 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -551,12 +551,9 @@ static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
int ret;
ret = platform_irq_count(pdev);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't determine irq count\n");
switch (ret) {
case 2:
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index b437847b6237..dbd327712205 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -3,6 +3,8 @@ if MIPS
source "drivers/platform/mips/Kconfig"
endif
+source "drivers/platform/loongarch/Kconfig"
+
source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 4de08ef4ec9d..41640172975a 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_LOONGARCH) += loongarch/
obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/
obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC_EC) += olpc/
diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig
new file mode 100644
index 000000000000..5633e4d73991
--- /dev/null
+++ b/drivers/platform/loongarch/Kconfig
@@ -0,0 +1,31 @@
+#
+# LoongArch Platform Specific Drivers
+#
+
+menuconfig LOONGARCH_PLATFORM_DEVICES
+ bool "LoongArch Platform Specific Device Drivers"
+ default y
+ depends on LOONGARCH
+ help
+ Say Y here to get to see options for device drivers of various
+ LoongArch platforms, including vendor-specific laptop/desktop
+ extension and hardware monitor drivers. This option itself does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if LOONGARCH_PLATFORM_DEVICES
+
+config LOONGSON_LAPTOP
+ tristate "Generic Loongson-3 Laptop Driver"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on MACH_LOONGSON64
+ select ACPI_VIDEO
+ select INPUT_SPARSEKMAP
+ default y
+ help
+ ACPI-based Loongson-3 family laptops generic driver.
+
+endif # LOONGARCH_PLATFORM_DEVICES
diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile
new file mode 100644
index 000000000000..f43ab03db1a2
--- /dev/null
+++ b/drivers/platform/loongarch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o
diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c
new file mode 100644
index 000000000000..f0166ad5d2c2
--- /dev/null
+++ b/drivers/platform/loongarch/loongson-laptop.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Loongson processor based LAPTOP/ALL-IN-ONE driver
+ *
+ * Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <acpi/video.h>
+
+/* 1. Driver-wide structs and misc. variables */
+
+/* ACPI HIDs */
+#define LOONGSON_ACPI_EC_HID "PNP0C09"
+#define LOONGSON_ACPI_HKEY_HID "LOON0000"
+
+#define ACPI_LAPTOP_NAME "loongson-laptop"
+#define ACPI_LAPTOP_ACPI_EVENT_PREFIX "loongson"
+
+#define MAX_ACPI_ARGS 3
+#define GENERIC_HOTKEY_MAP_MAX 64
+
+#define GENERIC_EVENT_TYPE_OFF 12
+#define GENERIC_EVENT_TYPE_MASK 0xF000
+#define GENERIC_EVENT_CODE_MASK 0x0FFF
+
+struct generic_sub_driver {
+ u32 type;
+ char *name;
+ acpi_handle *handle;
+ struct acpi_device *device;
+ struct platform_driver *driver;
+ int (*init)(struct generic_sub_driver *sub_driver);
+ void (*notify)(struct generic_sub_driver *sub_driver, u32 event);
+ u8 acpi_notify_installed;
+};
+
+static u32 input_device_registered;
+static struct input_dev *generic_inputdev;
+
+static acpi_handle hotkey_handle;
+static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
+
+int loongson_laptop_turn_on_backlight(void);
+int loongson_laptop_turn_off_backlight(void);
+static int loongson_laptop_backlight_update(struct backlight_device *bd);
+
+/* 2. ACPI Helpers and device model */
+
+static int acpi_evalf(acpi_handle handle, int *res, char *method, char *fmt, ...)
+{
+ char res_type;
+ char *fmt0 = fmt;
+ va_list ap;
+ int success, quiet;
+ acpi_status status;
+ struct acpi_object_list params;
+ struct acpi_buffer result, *resultp;
+ union acpi_object in_objs[MAX_ACPI_ARGS], out_obj;
+
+ if (!*fmt) {
+ pr_err("acpi_evalf() called with empty format\n");
+ return 0;
+ }
+
+ if (*fmt == 'q') {
+ quiet = 1;
+ fmt++;
+ } else
+ quiet = 0;
+
+ res_type = *(fmt++);
+
+ params.count = 0;
+ params.pointer = &in_objs[0];
+
+ va_start(ap, fmt);
+ while (*fmt) {
+ char c = *(fmt++);
+ switch (c) {
+ case 'd': /* int */
+ in_objs[params.count].integer.value = va_arg(ap, int);
+ in_objs[params.count++].type = ACPI_TYPE_INTEGER;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", c);
+ va_end(ap);
+ return 0;
+ }
+ }
+ va_end(ap);
+
+ if (res_type != 'v') {
+ result.length = sizeof(out_obj);
+ result.pointer = &out_obj;
+ resultp = &result;
+ } else
+ resultp = NULL;
+
+ status = acpi_evaluate_object(handle, method, &params, resultp);
+
+ switch (res_type) {
+ case 'd': /* int */
+ success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER);
+ if (success && res)
+ *res = out_obj.integer.value;
+ break;
+ case 'v': /* void */
+ success = status == AE_OK;
+ break;
+ /* add more types as needed */
+ default:
+ pr_err("acpi_evalf() called with invalid format character '%c'\n", res_type);
+ return 0;
+ }
+
+ if (!success && !quiet)
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
+ method, fmt0, acpi_format_exception(status));
+
+ return success;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hotkey_handle, status, "GSWS", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct generic_sub_driver *sub_driver = data;
+
+ if (!sub_driver || !sub_driver->notify)
+ return;
+ sub_driver->notify(sub_driver, event);
+}
+
+static int __init setup_acpi_notify(struct generic_sub_driver *sub_driver)
+{
+ acpi_status status;
+
+ if (!*sub_driver->handle)
+ return 0;
+
+ sub_driver->device = acpi_fetch_acpi_dev(*sub_driver->handle);
+ if (!sub_driver->device) {
+ pr_err("acpi_fetch_acpi_dev(%s) failed\n", sub_driver->name);
+ return -ENODEV;
+ }
+
+ sub_driver->device->driver_data = sub_driver;
+ sprintf(acpi_device_class(sub_driver->device), "%s/%s",
+ ACPI_LAPTOP_ACPI_EVENT_PREFIX, sub_driver->name);
+
+ status = acpi_install_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify, sub_driver);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_ALREADY_EXISTS) {
+ pr_notice("Another device driver is already "
+ "handling %s events\n", sub_driver->name);
+ } else {
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
+ sub_driver->name, acpi_format_exception(status));
+ }
+ return -ENODEV;
+ }
+ sub_driver->acpi_notify_installed = 1;
+
+ return 0;
+}
+
+static int loongson_hotkey_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int loongson_hotkey_resume(struct device *dev)
+{
+ int status = 0;
+ struct key_entry ke;
+ struct backlight_device *bd;
+
+ /*
+ * Only if the firmware supports SW_LID event model, we can handle the
+ * event. This is for the consideration of development board without EC.
+ */
+ if (test_bit(SW_LID, generic_inputdev->swbit)) {
+ if (hotkey_status_get(&status) < 0)
+ return -EIO;
+ /*
+ * The input device sw element records the last lid status.
+ * When the system is awakened by other wake-up sources,
+ * the lid event will also be reported. The judgment of
+ * adding SW_LID bit which in sw element can avoid this
+ * case.
+ *
+ * Input system will drop lid event when current lid event
+ * value and last lid status in the same. So laptop driver
+ * doesn't report repeated events.
+ *
+ * Lid status is generally 0, but hardware exception is
+ * considered. So add lid status confirmation.
+ */
+ if (test_bit(SW_LID, generic_inputdev->sw) && !(status & (1 << SW_LID))) {
+ ke.type = KE_SW;
+ ke.sw.value = (u8)status;
+ ke.sw.code = SW_LID;
+ sparse_keymap_report_entry(generic_inputdev, &ke, 1, true);
+ }
+ }
+
+ bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
+ if (bd) {
+ loongson_laptop_backlight_update(bd) ?
+ pr_warn("Loongson_backlight: resume brightness failed") :
+ pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(loongson_hotkey_pm,
+ loongson_hotkey_suspend, loongson_hotkey_resume);
+
+static int loongson_hotkey_probe(struct platform_device *pdev)
+{
+ hotkey_handle = ACPI_HANDLE(&pdev->dev);
+
+ if (!hotkey_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct acpi_device_id loongson_device_ids[] = {
+ {LOONGSON_ACPI_HKEY_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, loongson_device_ids);
+
+static struct platform_driver loongson_hotkey_driver = {
+ .probe = loongson_hotkey_probe,
+ .driver = {
+ .name = "loongson-hotkey",
+ .owner = THIS_MODULE,
+ .pm = pm_ptr(&loongson_hotkey_pm),
+ .acpi_match_table = loongson_device_ids,
+ },
+};
+
+static int hotkey_map(void)
+{
+ u32 index;
+ acpi_status status;
+ struct acpi_buffer buf;
+ union acpi_object *pack;
+
+ buf.length = ACPI_ALLOCATE_BUFFER;
+ status = acpi_evaluate_object_typed(hotkey_handle, "KMAP", NULL, &buf, ACPI_TYPE_PACKAGE);
+ if (status != AE_OK) {
+ pr_err("ACPI exception: %s\n", acpi_format_exception(status));
+ return -1;
+ }
+ pack = buf.pointer;
+ for (index = 0; index < pack->package.count; index++) {
+ union acpi_object *element, *sub_pack;
+
+ sub_pack = &pack->package.elements[index];
+
+ element = &sub_pack->package.elements[0];
+ hotkey_keycode_map[index].type = element->integer.value;
+ element = &sub_pack->package.elements[1];
+ hotkey_keycode_map[index].code = element->integer.value;
+ element = &sub_pack->package.elements[2];
+ hotkey_keycode_map[index].keycode = element->integer.value;
+ }
+
+ return 0;
+}
+
+static int hotkey_backlight_set(bool enable)
+{
+ if (!acpi_evalf(hotkey_handle, NULL, "VCBL", "vd", enable ? 1 : 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int ec_get_brightness(void)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECBG", "d"))
+ return -EIO;
+
+ return status;
+}
+
+static int ec_set_brightness(int level)
+{
+
+ int ret = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, NULL, "ECBS", "vd", level))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int ec_backlight_level(u8 level)
+{
+ int status = 0;
+
+ if (!hotkey_handle)
+ return -ENXIO;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level > status))
+ return status;
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECSL", "d"))
+ return -EIO;
+
+ if ((status < 0) || (level < status))
+ return status;
+
+ return level;
+}
+
+static int loongson_laptop_backlight_update(struct backlight_device *bd)
+{
+ int lvl = ec_backlight_level(bd->props.brightness);
+
+ if (lvl < 0)
+ return -EIO;
+ if (ec_set_brightness(lvl))
+ return -EIO;
+
+ return 0;
+}
+
+static int loongson_laptop_get_brightness(struct backlight_device *bd)
+{
+ int level;
+
+ level = ec_get_brightness();
+ if (level < 0)
+ return -EIO;
+
+ return level;
+}
+
+static const struct backlight_ops backlight_laptop_ops = {
+ .update_status = loongson_laptop_backlight_update,
+ .get_brightness = loongson_laptop_get_brightness,
+};
+
+static int laptop_backlight_register(void)
+{
+ int status = 0;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(props));
+
+ if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
+ return -EIO;
+
+ props.brightness = 1;
+ props.max_brightness = status;
+ props.type = BACKLIGHT_PLATFORM;
+
+ backlight_device_register("loongson_laptop",
+ NULL, NULL, &backlight_laptop_ops, &props);
+
+ return 0;
+}
+
+int loongson_laptop_turn_on_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 1;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int loongson_laptop_turn_off_backlight(void)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = 0;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init event_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ ret = hotkey_map();
+ if (ret < 0) {
+ pr_err("Failed to parse keymap from DSDT\n");
+ return ret;
+ }
+
+ ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL);
+ if (ret < 0) {
+ pr_err("Failed to setup input device keymap\n");
+ input_free_device(generic_inputdev);
+
+ return ret;
+ }
+
+ /*
+ * This hotkey driver handle backlight event when
+ * acpi_video_get_backlight_type() gets acpi_backlight_vendor
+ */
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor)
+ hotkey_backlight_set(true);
+ else
+ hotkey_backlight_set(false);
+
+ pr_info("ACPI: enabling firmware HKEY event interface...\n");
+
+ return ret;
+}
+
+static void event_notify(struct generic_sub_driver *sub_driver, u32 event)
+{
+ int type, scan_code;
+ struct key_entry *ke = NULL;
+
+ scan_code = event & GENERIC_EVENT_CODE_MASK;
+ type = (event & GENERIC_EVENT_TYPE_MASK) >> GENERIC_EVENT_TYPE_OFF;
+ ke = sparse_keymap_entry_from_scancode(generic_inputdev, scan_code);
+ if (ke) {
+ if (type == KE_SW) {
+ int status = 0;
+
+ if (hotkey_status_get(&status) < 0)
+ return;
+
+ ke->sw.value = !!(status & (1 << ke->sw.code));
+ }
+ sparse_keymap_report_entry(generic_inputdev, ke, 1, true);
+ }
+}
+
+/* 3. Infrastructure */
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver);
+
+static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
+{
+ int ret;
+
+ if (!sub_driver || !sub_driver->driver)
+ return -EINVAL;
+
+ ret = platform_driver_register(sub_driver->driver);
+ if (ret)
+ return -EINVAL;
+
+ if (sub_driver->init)
+ sub_driver->init(sub_driver);
+
+ if (sub_driver->notify) {
+ ret = setup_acpi_notify(sub_driver);
+ if (ret == -ENODEV) {
+ ret = 0;
+ goto err_out;
+ }
+ if (ret < 0)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ generic_subdriver_exit(sub_driver);
+ return (ret < 0) ? ret : 0;
+}
+
+static void generic_subdriver_exit(struct generic_sub_driver *sub_driver)
+{
+
+ if (sub_driver->acpi_notify_installed) {
+ acpi_remove_notify_handler(*sub_driver->handle,
+ sub_driver->type, dispatch_acpi_notify);
+ sub_driver->acpi_notify_installed = 0;
+ }
+ platform_driver_unregister(sub_driver->driver);
+}
+
+static struct generic_sub_driver generic_sub_drivers[] __refdata = {
+ {
+ .name = "hotkey",
+ .init = event_init,
+ .notify = event_notify,
+ .handle = &hotkey_handle,
+ .type = ACPI_DEVICE_NOTIFY,
+ .driver = &loongson_hotkey_driver,
+ },
+};
+
+static int __init generic_acpi_laptop_init(void)
+{
+ bool ec_found;
+ int i, ret, status;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* The EC device is required */
+ ec_found = acpi_dev_found(LOONGSON_ACPI_EC_HID);
+ if (!ec_found)
+ return -ENODEV;
+
+ /* Enable SCI for EC */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+
+ generic_inputdev = input_allocate_device();
+ if (!generic_inputdev) {
+ pr_err("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ /* Prepare input device, but don't register */
+ generic_inputdev->name =
+ "Loongson Generic Laptop/All-in-One Extra Buttons";
+ generic_inputdev->phys = ACPI_LAPTOP_NAME "/input0";
+ generic_inputdev->id.bustype = BUS_HOST;
+ generic_inputdev->dev.parent = NULL;
+
+ /* Init subdrivers */
+ for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) {
+ ret = generic_subdriver_init(&generic_sub_drivers[i]);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ return ret;
+ }
+ }
+
+ ret = input_register_device(generic_inputdev);
+ if (ret < 0) {
+ input_free_device(generic_inputdev);
+ while (--i >= 0)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ pr_err("Unable to register input device\n");
+ return ret;
+ }
+
+ input_device_registered = 1;
+
+ if (acpi_evalf(hotkey_handle, &status, "ECBG", "d")) {
+ pr_info("Loongson Laptop used, init brightness is 0x%x\n", status);
+ ret = laptop_backlight_register();
+ if (ret < 0)
+ pr_err("Loongson Laptop: laptop-backlight device register failed\n");
+ }
+
+ return 0;
+}
+
+static void __exit generic_acpi_laptop_exit(void)
+{
+ if (generic_inputdev) {
+ if (input_device_registered)
+ input_unregister_device(generic_inputdev);
+ else
+ input_free_device(generic_inputdev);
+ }
+}
+
+module_init(generic_acpi_laptop_init);
+module_exit(generic_acpi_laptop_exit);
+
+MODULE_AUTHOR("Jianmin Lv <lvjianmin@loongson.cn>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Laptop/All-in-One ACPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 617dbf98980e..97cfbc520a02 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -125,8 +125,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
return IRQ_NONE;
- generic_handle_irq(irq_find_mapping(chip->irq.domain,
- GPE0A_PME_B0_VIRT_GPIO_PIN));
+ generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN);
pm_wakeup_hard_event(chip->parent);
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index f83e9c393f31..5b8d1a9620a5 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -128,15 +128,15 @@ skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data
for_each_acpi_consumer_dev(adev, consumer) {
sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(consumer));
- if (!sensor_name)
+ if (!sensor_name) {
+ acpi_dev_put(consumer);
return -ENOMEM;
+ }
(*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
i++;
}
- acpi_dev_put(consumer);
-
return n_consumers;
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index da78dc77aed3..4f05f610391b 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -206,7 +206,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
if (i >= 0) {
flags = acpi_dev_irq_flags(gpio->triggering,
gpio->polarity,
- gpio->shareable);
+ gpio->shareable,
+ gpio->wake_capable);
} else {
flags = IORESOURCE_DISABLED;
}
@@ -315,7 +316,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -339,7 +340,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 4a688741a88a..16bc01738be9 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -82,6 +82,7 @@ static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pms405-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pm8998-pon", .data = (void *)GEN2_REASON_SHIFT },
+ { .compatible = "qcom,pmk8350-pon", .data = (void *)GEN2_REASON_SHIFT },
{ }
};
MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 1aa8323ad9f6..0bbfe6a7ce4d 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -619,6 +619,21 @@ config CHARGER_MT6360
Average Input Current Regulation, Battery Temperature Sensing,
Over-Temperature Protection, DPDM Detection for BC1.2.
+config CHARGER_MT6370
+ tristate "MediaTek MT6370 Charger Driver"
+ depends on MFD_MT6370
+ depends on REGULATOR
+ depends on IIO
+ select LINEAR_RANGES
+ help
+ Say Y here to enable MT6370 Charger Part.
+ The device supports High-Accuracy Voltage/Current Regulation,
+ Average Input Current Regulation, Battery Temperature Sensing,
+ Over-Temperature Protection, DPDM Detection for BC1.2.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370-charger".
+
config CHARGER_QCOM_SMBB
tristate "Qualcomm Switch-Mode Battery Charger and Boost"
depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -708,6 +723,12 @@ config CHARGER_BQ256XX
charge management and system power path management devices for single
cell Li-ion and Li-polymer batteries.
+config CHARGER_RK817
+ tristate "Rockchip RK817 PMIC Battery Charger"
+ depends on MFD_RK808
+ help
+ Say Y to include support for Rockchip RK817 Battery Charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB3XX Battery Charger"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 7f02f36aea55..0ee8653e882e 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
obj-$(CONFIG_CHARGER_MT6360) += mt6360_charger.o
+obj-$(CONFIG_CHARGER_MT6370) += mt6370-charger.o
obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
@@ -91,6 +92,7 @@ obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o
obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_CHARGER_BQ25980) += bq25980_charger.o
obj-$(CONFIG_CHARGER_BQ256XX) += bq256xx_charger.o
+obj-$(CONFIG_CHARGER_RK817) += rk817_charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index ae4be553f424..ea4ad61d4c7e 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -252,12 +252,6 @@ static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_HEALTH,
};
-struct ab8500_chargalg_sysfs_entry {
- struct attribute attr;
- ssize_t (*show)(struct ab8500_chargalg *di, char *buf);
- ssize_t (*store)(struct ab8500_chargalg *di, const char *buf, size_t length);
-};
-
/**
* ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
* @timer: pointer to the hrtimer structure
@@ -490,8 +484,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
int vset_uv, int iset_ua)
{
- static int ab8500_chargalg_ex_ac_enable_toggle;
-
if (!di->ac_chg || !di->ac_chg->ops.enable)
return -ENXIO;
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 003557043ab3..fcf8ff0bc974 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -427,11 +427,11 @@ static int adp5061_get_chg_type(struct adp5061_state *st,
if (ret < 0)
return ret;
- chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)];
- if (chg_type > ADP5061_CHG_FAST_CV)
+ chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1);
+ if (chg_type >= ARRAY_SIZE(adp5061_chg_type))
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else
- val->intval = chg_type;
+ val->intval = adp5061_chg_type[chg_type];
return ret;
}
@@ -493,6 +493,9 @@ static int adp5061_get_battery_status(struct adp5061_state *st,
case 0x4: /* VBAT_SNS > VWEAK */
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
+ default:
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ break;
}
return ret;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 06ea7399d151..6020b58c641d 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -613,6 +613,33 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
return 0;
}
+static int bq25890_power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq25890_device *bq = power_supply_get_drvdata(psy);
+ u8 lval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ lval = bq25890_find_idx(val->intval, TBL_IINLIM);
+ return bq25890_field_write(bq, F_IINLIM, lval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bq25890_power_supply_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* On the BQ25892 try to get charger-type info from our supplier */
static void bq25890_charger_external_power_changed(struct power_supply *psy)
{
@@ -874,6 +901,8 @@ static const struct power_supply_desc bq25890_power_supply_desc = {
.properties = bq25890_power_supply_props,
.num_properties = ARRAY_SIZE(bq25890_power_supply_props),
.get_property = bq25890_power_supply_get_property,
+ .set_property = bq25890_power_supply_set_property,
+ .property_is_writeable = bq25890_power_supply_property_is_writeable,
.external_power_changed = bq25890_charger_external_power_changed,
};
@@ -946,6 +975,7 @@ static void bq25890_pump_express_work(struct work_struct *data)
return;
error_print:
+ bq25890_field_write(bq, F_PUMPX_EN, 0);
dev_err(bq->dev, "Failed to request hi-voltage charging\n");
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 35e6a394c0df..8bf048fbd36a 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -868,11 +868,11 @@ enum bq27xxx_dm_reg_id {
BQ27XXX_DM_TERMINATE_VOLTAGE,
};
-#define bq27000_dm_regs 0
-#define bq27010_dm_regs 0
-#define bq2750x_dm_regs 0
-#define bq2751x_dm_regs 0
-#define bq2752x_dm_regs 0
+#define bq27000_dm_regs NULL
+#define bq27010_dm_regs NULL
+#define bq2750x_dm_regs NULL
+#define bq2751x_dm_regs NULL
+#define bq2752x_dm_regs NULL
#if 0 /* not yet tested */
static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
@@ -881,24 +881,24 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 48, 2, 1000, 32767 },
};
#else
-#define bq27500_dm_regs 0
+#define bq27500_dm_regs NULL
#endif
/* todo create data memory definitions from datasheets and test on chips */
-#define bq27510g1_dm_regs 0
-#define bq27510g2_dm_regs 0
-#define bq27510g3_dm_regs 0
-#define bq27520g1_dm_regs 0
-#define bq27520g2_dm_regs 0
-#define bq27520g3_dm_regs 0
-#define bq27520g4_dm_regs 0
-#define bq27521_dm_regs 0
-#define bq27530_dm_regs 0
-#define bq27531_dm_regs 0
-#define bq27541_dm_regs 0
-#define bq27542_dm_regs 0
-#define bq27546_dm_regs 0
-#define bq27742_dm_regs 0
+#define bq27510g1_dm_regs NULL
+#define bq27510g2_dm_regs NULL
+#define bq27510g3_dm_regs NULL
+#define bq27520g1_dm_regs NULL
+#define bq27520g2_dm_regs NULL
+#define bq27520g3_dm_regs NULL
+#define bq27520g4_dm_regs NULL
+#define bq27521_dm_regs NULL
+#define bq27530_dm_regs NULL
+#define bq27531_dm_regs NULL
+#define bq27541_dm_regs NULL
+#define bq27542_dm_regs NULL
+#define bq27546_dm_regs NULL
+#define bq27742_dm_regs NULL
#if 0 /* not yet tested */
static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
@@ -907,7 +907,7 @@ static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 67, 2, 2800, 3700 },
};
#else
-#define bq27545_dm_regs 0
+#define bq27545_dm_regs NULL
#endif
static struct bq27xxx_dm_reg bq27411_dm_regs[] = {
@@ -937,7 +937,7 @@ static struct bq27xxx_dm_reg bq27426_dm_regs[] = {
#if 0 /* not yet tested */
#define bq27441_dm_regs bq27421_dm_regs
#else
-#define bq27441_dm_regs 0
+#define bq27441_dm_regs NULL
#endif
#if 0 /* not yet tested */
@@ -947,13 +947,13 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 9, 2, 2500, 3700 },
};
#else
-#define bq27621_dm_regs 0
+#define bq27621_dm_regs NULL
#endif
-#define bq27z561_dm_regs 0
-#define bq28z610_dm_regs 0
-#define bq34z100_dm_regs 0
-#define bq78z100_dm_regs 0
+#define bq27z561_dm_regs NULL
+#define bq28z610_dm_regs NULL
+#define bq34z100_dm_regs NULL
+#define bq78z100_dm_regs NULL
#define BQ27XXX_O_ZERO BIT(0)
#define BQ27XXX_O_OTDC BIT(1) /* has OTC/OTD overtemperature flags */
@@ -1044,12 +1044,12 @@ struct bq27xxx_dm_buf {
.block = (di)->dm_regs[i].offset / BQ27XXX_DM_SZ, \
}
-static inline u16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf,
+static inline __be16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf,
struct bq27xxx_dm_reg *reg)
{
if (buf->class == reg->subclass_id &&
buf->block == reg->offset / BQ27XXX_DM_SZ)
- return (u16 *) (buf->data + reg->offset % BQ27XXX_DM_SZ);
+ return (__be16 *) (buf->data + reg->offset % BQ27XXX_DM_SZ);
return NULL;
}
@@ -1275,7 +1275,7 @@ static void bq27xxx_battery_update_dm_block(struct bq27xxx_device_info *di,
{
struct bq27xxx_dm_reg *reg = &di->dm_regs[reg_id];
const char *str = bq27xxx_dm_reg_name[reg_id];
- u16 *prev = bq27xxx_dm_reg_ptr(buf, reg);
+ __be16 *prev = bq27xxx_dm_reg_ptr(buf, reg);
if (prev == NULL) {
dev_warn(di->dev, "buffer does not match %s dm spec\n", str);
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 60e0ce105a29..be9764541d52 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -5,7 +5,7 @@
* Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
*
* Rewritten for Linux power framework with some parts based on
- * on earlier driver found in the Motorola Linux kernel:
+ * earlier driver found in the Motorola Linux kernel:
*
* Copyright (C) 2009-2010 Motorola, Inc.
*/
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 81e17ad80163..6d52641151d9 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/devm-helpers.h>
#define CW2015_SIZE_BATINFO 64
@@ -698,7 +699,8 @@ static int cw_bat_probe(struct i2c_client *client)
}
cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery");
- INIT_DELAYED_WORK(&cw_bat->battery_delay_work, cw_bat_work);
+ devm_delayed_work_autocancel(&client->dev,
+ &cw_bat->battery_delay_work, cw_bat_work);
queue_delayed_work(cw_bat->battery_workqueue,
&cw_bat->battery_delay_work, msecs_to_jiffies(10));
return 0;
@@ -725,14 +727,6 @@ static int __maybe_unused cw_bat_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
-static void cw_bat_remove(struct i2c_client *client)
-{
- struct cw_battery *cw_bat = i2c_get_clientdata(client);
-
- cancel_delayed_work_sync(&cw_bat->battery_delay_work);
- power_supply_put_battery_info(cw_bat->rk_bat, cw_bat->battery);
-}
-
static const struct i2c_device_id cw_bat_id_table[] = {
{ "cw2015", 0 },
{ }
@@ -751,7 +745,6 @@ static struct i2c_driver cw_bat_driver = {
.pm = &cw_bat_pm_ops,
},
.probe_new = cw_bat_probe,
- .remove = cw_bat_remove,
.id_table = cw_bat_id_table,
};
diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c
index 473e53cd2801..d8d52e09da7b 100644
--- a/drivers/power/supply/max1721x_battery.c
+++ b/drivers/power/supply/max1721x_battery.c
@@ -444,5 +444,5 @@ module_w1_family(w1_max1721x_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex A. Mihaylov <minimumlaw@rambler.ru>");
-MODULE_DESCRIPTION("Maxim MAX17211/MAX17215 Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim MAX17211/MAX17215 Fuel Gauge IC driver");
MODULE_ALIAS("w1-family-" __stringify(W1_MAX1721X_FAMILY_ID));
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
new file mode 100644
index 000000000000..f27dae5043f5
--- /dev/null
+++ b/drivers/power/supply/mt6370-charger.c
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiaEn Wu <chiaen_wu@richtek.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/workqueue.h>
+
+#define MT6370_REG_CHG_CTRL1 0x111
+#define MT6370_REG_CHG_CTRL2 0x112
+#define MT6370_REG_CHG_CTRL3 0x113
+#define MT6370_REG_CHG_CTRL4 0x114
+#define MT6370_REG_CHG_CTRL5 0x115
+#define MT6370_REG_CHG_CTRL6 0x116
+#define MT6370_REG_CHG_CTRL7 0x117
+#define MT6370_REG_CHG_CTRL8 0x118
+#define MT6370_REG_CHG_CTRL9 0x119
+#define MT6370_REG_CHG_CTRL10 0x11A
+#define MT6370_REG_DEVICE_TYPE 0x122
+#define MT6370_REG_USB_STATUS1 0x127
+#define MT6370_REG_CHG_STAT 0x14A
+#define MT6370_REG_FLED_EN 0x17E
+#define MT6370_REG_CHG_STAT1 0X1D0
+#define MT6370_REG_OVPCTRL_STAT 0x1D8
+
+#define MT6370_VOBST_MASK GENMASK(7, 2)
+#define MT6370_OTG_PIN_EN_MASK BIT(1)
+#define MT6370_OPA_MODE_MASK BIT(0)
+#define MT6370_OTG_OC_MASK GENMASK(2, 0)
+
+#define MT6370_MIVR_IBUS_TH_100_mA 100000
+#define MT6370_ADC_CHAN_IBUS 5
+#define MT6370_ADC_CHAN_MAX 9
+
+enum mt6370_chg_reg_field {
+ /* MT6370_REG_CHG_CTRL2 */
+ F_IINLMTSEL, F_CFO_EN, F_CHG_EN,
+ /* MT6370_REG_CHG_CTRL3 */
+ F_IAICR, F_AICR_EN, F_ILIM_EN,
+ /* MT6370_REG_CHG_CTRL4 */
+ F_VOREG,
+ /* MT6370_REG_CHG_CTRL6 */
+ F_VMIVR,
+ /* MT6370_REG_CHG_CTRL7 */
+ F_ICHG,
+ /* MT6370_REG_CHG_CTRL8 */
+ F_IPREC,
+ /* MT6370_REG_CHG_CTRL9 */
+ F_IEOC,
+ /* MT6370_REG_DEVICE_TYPE */
+ F_USBCHGEN,
+ /* MT6370_REG_USB_STATUS1 */
+ F_USB_STAT, F_CHGDET,
+ /* MT6370_REG_CHG_STAT */
+ F_CHG_STAT, F_BOOST_STAT, F_VBAT_LVL,
+ /* MT6370_REG_FLED_EN */
+ F_FL_STROBE,
+ /* MT6370_REG_CHG_STAT1 */
+ F_CHG_MIVR_STAT,
+ /* MT6370_REG_OVPCTRL_STAT */
+ F_UVP_D_STAT,
+ F_MAX
+};
+
+enum mt6370_irq {
+ MT6370_IRQ_ATTACH_I = 0,
+ MT6370_IRQ_UVP_D_EVT,
+ MT6370_IRQ_MIVR,
+ MT6370_IRQ_MAX
+};
+
+struct mt6370_priv {
+ struct device *dev;
+ struct iio_channel *iio_adcs;
+ struct mutex attach_lock;
+ struct power_supply *psy;
+ struct regmap *regmap;
+ struct regmap_field *rmap_fields[F_MAX];
+ struct regulator_dev *rdev;
+ struct workqueue_struct *wq;
+ struct work_struct bc12_work;
+ struct delayed_work mivr_dwork;
+ unsigned int irq_nums[MT6370_IRQ_MAX];
+ int attach;
+ int psy_usb_type;
+ bool pwr_rdy;
+};
+
+enum mt6370_usb_status {
+ MT6370_USB_STAT_NO_VBUS = 0,
+ MT6370_USB_STAT_VBUS_FLOW_IS_UNDER_GOING,
+ MT6370_USB_STAT_SDP,
+ MT6370_USB_STAT_SDP_NSTD,
+ MT6370_USB_STAT_DCP,
+ MT6370_USB_STAT_CDP,
+ MT6370_USB_STAT_MAX
+};
+
+struct mt6370_chg_field {
+ const char *name;
+ const struct linear_range *range;
+ struct reg_field field;
+};
+
+enum {
+ MT6370_RANGE_F_IAICR = 0,
+ MT6370_RANGE_F_VOREG,
+ MT6370_RANGE_F_VMIVR,
+ MT6370_RANGE_F_ICHG,
+ MT6370_RANGE_F_IPREC,
+ MT6370_RANGE_F_IEOC,
+ MT6370_RANGE_F_MAX
+};
+
+static const struct linear_range mt6370_chg_ranges[MT6370_RANGE_F_MAX] = {
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IAICR, 100000, 0x0, 0x3F, 50000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_VOREG, 3900000, 0x0, 0x51, 10000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_VMIVR, 3900000, 0x0, 0x5F, 100000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_ICHG, 900000, 0x08, 0x31, 100000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IPREC, 100000, 0x0, 0x0F, 50000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IEOC, 100000, 0x0, 0x0F, 50000),
+};
+
+#define MT6370_CHG_FIELD(_fd, _reg, _lsb, _msb) \
+[_fd] = { \
+ .name = #_fd, \
+ .range = NULL, \
+ .field = REG_FIELD(_reg, _lsb, _msb), \
+}
+
+#define MT6370_CHG_FIELD_RANGE(_fd, _reg, _lsb, _msb) \
+[_fd] = { \
+ .name = #_fd, \
+ .range = &mt6370_chg_ranges[MT6370_RANGE_##_fd], \
+ .field = REG_FIELD(_reg, _lsb, _msb), \
+}
+
+static const struct mt6370_chg_field mt6370_chg_fields[F_MAX] = {
+ MT6370_CHG_FIELD(F_IINLMTSEL, MT6370_REG_CHG_CTRL2, 2, 3),
+ MT6370_CHG_FIELD(F_CFO_EN, MT6370_REG_CHG_CTRL2, 1, 1),
+ MT6370_CHG_FIELD(F_CHG_EN, MT6370_REG_CHG_CTRL2, 0, 0),
+ MT6370_CHG_FIELD_RANGE(F_IAICR, MT6370_REG_CHG_CTRL3, 2, 7),
+ MT6370_CHG_FIELD(F_AICR_EN, MT6370_REG_CHG_CTRL3, 1, 1),
+ MT6370_CHG_FIELD(F_ILIM_EN, MT6370_REG_CHG_CTRL3, 0, 0),
+ MT6370_CHG_FIELD_RANGE(F_VOREG, MT6370_REG_CHG_CTRL4, 1, 7),
+ MT6370_CHG_FIELD_RANGE(F_VMIVR, MT6370_REG_CHG_CTRL6, 1, 7),
+ MT6370_CHG_FIELD_RANGE(F_ICHG, MT6370_REG_CHG_CTRL7, 2, 7),
+ MT6370_CHG_FIELD_RANGE(F_IPREC, MT6370_REG_CHG_CTRL8, 0, 3),
+ MT6370_CHG_FIELD_RANGE(F_IEOC, MT6370_REG_CHG_CTRL9, 4, 7),
+ MT6370_CHG_FIELD(F_USBCHGEN, MT6370_REG_DEVICE_TYPE, 7, 7),
+ MT6370_CHG_FIELD(F_USB_STAT, MT6370_REG_USB_STATUS1, 4, 6),
+ MT6370_CHG_FIELD(F_CHGDET, MT6370_REG_USB_STATUS1, 3, 3),
+ MT6370_CHG_FIELD(F_CHG_STAT, MT6370_REG_CHG_STAT, 6, 7),
+ MT6370_CHG_FIELD(F_BOOST_STAT, MT6370_REG_CHG_STAT, 3, 3),
+ MT6370_CHG_FIELD(F_VBAT_LVL, MT6370_REG_CHG_STAT, 5, 5),
+ MT6370_CHG_FIELD(F_FL_STROBE, MT6370_REG_FLED_EN, 2, 2),
+ MT6370_CHG_FIELD(F_CHG_MIVR_STAT, MT6370_REG_CHG_STAT1, 6, 6),
+ MT6370_CHG_FIELD(F_UVP_D_STAT, MT6370_REG_OVPCTRL_STAT, 4, 4),
+};
+
+static inline int mt6370_chg_field_get(struct mt6370_priv *priv,
+ enum mt6370_chg_reg_field fd,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_field_read(priv->rmap_fields[fd], &reg_val);
+ if (ret)
+ return ret;
+
+ if (mt6370_chg_fields[fd].range)
+ return linear_range_get_value(mt6370_chg_fields[fd].range,
+ reg_val, val);
+
+ *val = reg_val;
+ return 0;
+}
+
+static inline int mt6370_chg_field_set(struct mt6370_priv *priv,
+ enum mt6370_chg_reg_field fd,
+ unsigned int val)
+{
+ int ret;
+ bool f;
+ const struct linear_range *r;
+
+ if (mt6370_chg_fields[fd].range) {
+ r = mt6370_chg_fields[fd].range;
+
+ if (fd == F_VMIVR) {
+ ret = linear_range_get_selector_high(r, val, &val, &f);
+ if (ret)
+ val = r->max_sel;
+ } else {
+ linear_range_get_selector_within(r, val, &val);
+ }
+ }
+
+ return regmap_field_write(priv->rmap_fields[fd], val);
+}
+
+enum {
+ MT6370_CHG_STAT_READY = 0,
+ MT6370_CHG_STAT_CHARGE_IN_PROGRESS,
+ MT6370_CHG_STAT_DONE,
+ MT6370_CHG_STAT_FAULT,
+ MT6370_CHG_STAT_MAX
+};
+
+enum {
+ MT6370_ATTACH_STAT_DETACH = 0,
+ MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12,
+ MT6370_ATTACH_STAT_ATTACH_BC12_DONE,
+ MT6370_ATTACH_STAT_ATTACH_MAX
+};
+
+static int mt6370_chg_otg_of_parse_cb(struct device_node *of,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *rcfg)
+{
+ struct mt6370_priv *priv = rcfg->driver_data;
+
+ rcfg->ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(of),
+ "enable", 0, GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ rdesc->name);
+ if (IS_ERR(rcfg->ena_gpiod)) {
+ rcfg->ena_gpiod = NULL;
+ return 0;
+ }
+
+ return regmap_update_bits(priv->regmap, MT6370_REG_CHG_CTRL1,
+ MT6370_OTG_PIN_EN_MASK,
+ MT6370_OTG_PIN_EN_MASK);
+}
+
+static void mt6370_chg_bc12_work_func(struct work_struct *work)
+{
+ struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
+ bc12_work);
+ int ret;
+ bool rpt_psy = false;
+ unsigned int attach, usb_stat;
+
+ mutex_lock(&priv->attach_lock);
+ attach = priv->attach;
+
+ switch (attach) {
+ case MT6370_ATTACH_STAT_DETACH:
+ usb_stat = 0;
+ break;
+ case MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12:
+ ret = mt6370_chg_field_set(priv, F_USBCHGEN, attach);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable USB CHG EN\n");
+ goto bc12_work_func_out;
+ case MT6370_ATTACH_STAT_ATTACH_BC12_DONE:
+ ret = mt6370_chg_field_get(priv, F_USB_STAT, &usb_stat);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get USB status\n");
+ goto bc12_work_func_out;
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Invalid attach state\n");
+ goto bc12_work_func_out;
+ }
+
+ rpt_psy = true;
+
+ switch (usb_stat) {
+ case MT6370_USB_STAT_SDP:
+ case MT6370_USB_STAT_SDP_NSTD:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case MT6370_USB_STAT_DCP:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case MT6370_USB_STAT_CDP:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case MT6370_USB_STAT_NO_VBUS:
+ case MT6370_USB_STAT_VBUS_FLOW_IS_UNDER_GOING:
+ default:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+
+bc12_work_func_out:
+ mutex_unlock(&priv->attach_lock);
+
+ if (rpt_psy)
+ power_supply_changed(priv->psy);
+}
+
+static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
+{
+ int ret;
+ unsigned int fl_strobe;
+
+ /* check if flash led in strobe mode */
+ ret = mt6370_chg_field_get(priv, F_FL_STROBE, &fl_strobe);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get FL_STROBE_EN\n");
+ return ret;
+ }
+
+ if (fl_strobe) {
+ dev_err(priv->dev, "Flash led is still in strobe mode\n");
+ return ret;
+ }
+
+ /* cfo off */
+ ret = mt6370_chg_field_set(priv, F_CFO_EN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable CFO_EN\n");
+ return ret;
+ }
+
+ /* cfo on */
+ ret = mt6370_chg_field_set(priv, F_CFO_EN, 1);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable CFO_EN\n");
+
+ return ret;
+}
+
+static int mt6370_chg_read_adc_chan(struct mt6370_priv *priv, unsigned int chan,
+ int *val)
+{
+ int ret;
+
+ if (chan >= MT6370_ADC_CHAN_MAX)
+ return -EINVAL;
+
+ ret = iio_read_channel_processed(&priv->iio_adcs[chan], val);
+ if (ret)
+ dev_err(priv->dev, "Failed to read ADC\n");
+
+ return ret;
+}
+
+static void mt6370_chg_mivr_dwork_func(struct work_struct *work)
+{
+ struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
+ mivr_dwork.work);
+ int ret;
+ unsigned int mivr_stat, ibus;
+
+ ret = mt6370_chg_field_get(priv, F_CHG_MIVR_STAT, &mivr_stat);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get mivr state\n");
+ goto mivr_handler_out;
+ }
+
+ if (!mivr_stat)
+ goto mivr_handler_out;
+
+ ret = mt6370_chg_read_adc_chan(priv, MT6370_ADC_CHAN_IBUS, &ibus);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get ibus\n");
+ goto mivr_handler_out;
+ }
+
+ if (ibus < MT6370_MIVR_IBUS_TH_100_mA) {
+ ret = mt6370_chg_toggle_cfo(priv);
+ if (ret)
+ dev_err(priv->dev, "Failed to toggle cfo\n");
+ }
+
+mivr_handler_out:
+ enable_irq(priv->irq_nums[MT6370_IRQ_MIVR]);
+ pm_relax(priv->dev);
+}
+
+static void mt6370_chg_pwr_rdy_check(struct mt6370_priv *priv)
+{
+ int ret;
+ unsigned int opposite_pwr_rdy, otg_en;
+ union power_supply_propval val;
+
+ /* Check in OTG mode or not */
+ ret = mt6370_chg_field_get(priv, F_BOOST_STAT, &otg_en);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get OTG state\n");
+ return;
+ }
+
+ if (otg_en)
+ return;
+
+ ret = mt6370_chg_field_get(priv, F_UVP_D_STAT, &opposite_pwr_rdy);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get opposite power ready state\n");
+ return;
+ }
+
+ val.intval = opposite_pwr_rdy ?
+ MT6370_ATTACH_STAT_DETACH :
+ MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12;
+
+ ret = power_supply_set_property(priv->psy, POWER_SUPPLY_PROP_ONLINE,
+ &val);
+ if (ret)
+ dev_err(priv->dev, "Failed to start attach/detach flow\n");
+}
+
+static int mt6370_chg_get_online(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ mutex_lock(&priv->attach_lock);
+ val->intval = !!priv->attach;
+ mutex_unlock(&priv->attach_lock);
+
+ return 0;
+}
+
+static int mt6370_chg_get_status(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ int ret;
+ unsigned int chg_stat;
+ union power_supply_propval online;
+
+ ret = power_supply_get_property(priv->psy, POWER_SUPPLY_PROP_ONLINE,
+ &online);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get online status\n");
+ return ret;
+ }
+
+ if (!online.intval) {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ return 0;
+ }
+
+ ret = mt6370_chg_field_get(priv, F_CHG_STAT, &chg_stat);
+ if (ret)
+ return ret;
+
+ switch (chg_stat) {
+ case MT6370_CHG_STAT_READY:
+ case MT6370_CHG_STAT_FAULT:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return ret;
+ case MT6370_CHG_STAT_CHARGE_IN_PROGRESS:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ return ret;
+ case MT6370_CHG_STAT_DONE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ return ret;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ return ret;
+ }
+}
+
+static int mt6370_chg_get_charge_type(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ int type, ret;
+ unsigned int chg_stat, vbat_lvl;
+
+ ret = mt6370_chg_field_get(priv, F_CHG_STAT, &chg_stat);
+ if (ret)
+ return ret;
+
+ ret = mt6370_chg_field_get(priv, F_VBAT_LVL, &vbat_lvl);
+ if (ret)
+ return ret;
+
+ switch (chg_stat) {
+ case MT6370_CHG_STAT_CHARGE_IN_PROGRESS:
+ if (vbat_lvl)
+ type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else
+ type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case MT6370_CHG_STAT_READY:
+ case MT6370_CHG_STAT_DONE:
+ case MT6370_CHG_STAT_FAULT:
+ default:
+ type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int mt6370_chg_set_online(struct mt6370_priv *priv,
+ const union power_supply_propval *val)
+{
+ bool pwr_rdy = !!val->intval;
+
+ mutex_lock(&priv->attach_lock);
+ if (pwr_rdy == !!priv->attach) {
+ dev_err(priv->dev, "pwr_rdy is same(%d)\n", pwr_rdy);
+ mutex_unlock(&priv->attach_lock);
+ return 0;
+ }
+
+ priv->attach = pwr_rdy;
+ mutex_unlock(&priv->attach_lock);
+
+ if (!queue_work(priv->wq, &priv->bc12_work))
+ dev_err(priv->dev, "bc12 work has already queued\n");
+
+ return 0;
+}
+
+static int mt6370_chg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mt6370_priv *priv = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return mt6370_chg_get_online(priv, val);
+ case POWER_SUPPLY_PROP_STATUS:
+ return mt6370_chg_get_status(priv, val);
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return mt6370_chg_get_charge_type(priv, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mt6370_chg_field_get(priv, F_ICHG, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = linear_range_get_max_value(&mt6370_chg_ranges[MT6370_RANGE_F_ICHG]);
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mt6370_chg_field_get(priv, F_VOREG, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = linear_range_get_max_value(&mt6370_chg_ranges[MT6370_RANGE_F_VOREG]);
+ return 0;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mt6370_chg_field_get(priv, F_IAICR, &val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mt6370_chg_field_get(priv, F_VMIVR, &val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mt6370_chg_field_get(priv, F_IPREC, &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mt6370_chg_field_get(priv, F_IEOC, &val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = priv->psy_usb_type;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt6370_chg_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mt6370_priv *priv = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return mt6370_chg_set_online(priv, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mt6370_chg_field_set(priv, F_ICHG, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mt6370_chg_field_set(priv, F_VOREG, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mt6370_chg_field_set(priv, F_IAICR, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mt6370_chg_field_set(priv, F_VMIVR, val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mt6370_chg_field_set(priv, F_IPREC, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mt6370_chg_field_set(priv, F_IEOC, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt6370_chg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static enum power_supply_property mt6370_chg_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static enum power_supply_usb_type mt6370_chg_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+};
+
+static const struct power_supply_desc mt6370_chg_psy_desc = {
+ .name = "mt6370-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = mt6370_chg_properties,
+ .num_properties = ARRAY_SIZE(mt6370_chg_properties),
+ .get_property = mt6370_chg_get_property,
+ .set_property = mt6370_chg_set_property,
+ .property_is_writeable = mt6370_chg_property_is_writeable,
+ .usb_types = mt6370_chg_usb_types,
+ .num_usb_types = ARRAY_SIZE(mt6370_chg_usb_types),
+};
+
+static const struct regulator_ops mt6370_chg_otg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+};
+
+static const u32 mt6370_chg_otg_oc_ma[] = {
+ 500000, 700000, 1100000, 1300000, 1800000, 2100000, 2400000,
+};
+
+static const struct regulator_desc mt6370_chg_otg_rdesc = {
+ .of_match = "usb-otg-vbus-regulator",
+ .of_parse_cb = mt6370_chg_otg_of_parse_cb,
+ .name = "mt6370-usb-otg-vbus",
+ .ops = &mt6370_chg_otg_ops,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 4425000,
+ .uV_step = 25000,
+ .n_voltages = 57,
+ .vsel_reg = MT6370_REG_CHG_CTRL5,
+ .vsel_mask = MT6370_VOBST_MASK,
+ .enable_reg = MT6370_REG_CHG_CTRL1,
+ .enable_mask = MT6370_OPA_MODE_MASK,
+ .curr_table = mt6370_chg_otg_oc_ma,
+ .n_current_limits = ARRAY_SIZE(mt6370_chg_otg_oc_ma),
+ .csel_reg = MT6370_REG_CHG_CTRL10,
+ .csel_mask = MT6370_OTG_OC_MASK,
+};
+
+static int mt6370_chg_init_rmap_fields(struct mt6370_priv *priv)
+{
+ int i;
+ const struct mt6370_chg_field *fds = mt6370_chg_fields;
+
+ for (i = 0; i < F_MAX; i++) {
+ priv->rmap_fields[i] = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ fds[i].field);
+ if (IS_ERR(priv->rmap_fields[i]))
+ return dev_err_probe(priv->dev,
+ PTR_ERR(priv->rmap_fields[i]),
+ "Failed to allocate regmapfield[%s]\n",
+ fds[i].name);
+ }
+
+ return 0;
+}
+
+static int mt6370_chg_init_setting(struct mt6370_priv *priv)
+{
+ int ret;
+
+ /* Disable usb_chg_en */
+ ret = mt6370_chg_field_set(priv, F_USBCHGEN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable usb_chg_en\n");
+ return ret;
+ }
+
+ /* Disable input current limit */
+ ret = mt6370_chg_field_set(priv, F_ILIM_EN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable input current limit\n");
+ return ret;
+ }
+
+ /* ICHG/IEOC Workaround, ICHG can not be set less than 900mA */
+ ret = mt6370_chg_field_set(priv, F_ICHG, 900000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set ICHG to 900mA");
+ return ret;
+ }
+
+ /* Change input current limit selection to using IAICR results */
+ ret = mt6370_chg_field_set(priv, F_IINLMTSEL, 2);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set IINLMTSEL\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#define MT6370_CHG_DT_PROP_DECL(_name, _type, _field) \
+{ \
+ .name = "mediatek,chg-" #_name, \
+ .type = MT6370_PARSE_TYPE_##_type, \
+ .fd = _field, \
+}
+
+static int mt6370_chg_init_otg_regulator(struct mt6370_priv *priv)
+{
+ struct regulator_config rcfg = {
+ .dev = priv->dev,
+ .regmap = priv->regmap,
+ .driver_data = priv,
+ };
+
+ priv->rdev = devm_regulator_register(priv->dev, &mt6370_chg_otg_rdesc,
+ &rcfg);
+
+ return PTR_ERR_OR_ZERO(priv->rdev);
+}
+
+static int mt6370_chg_init_psy(struct mt6370_priv *priv)
+{
+ struct power_supply_config cfg = {
+ .drv_data = priv,
+ .of_node = dev_of_node(priv->dev),
+ };
+
+ priv->psy = devm_power_supply_register(priv->dev, &mt6370_chg_psy_desc,
+ &cfg);
+
+ return PTR_ERR_OR_ZERO(priv->psy);
+}
+
+static void mt6370_chg_destroy_attach_lock(void *data)
+{
+ struct mutex *attach_lock = data;
+
+ mutex_destroy(attach_lock);
+}
+
+static void mt6370_chg_destroy_wq(void *data)
+{
+ struct workqueue_struct *wq = data;
+
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+}
+
+static irqreturn_t mt6370_attach_i_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+ unsigned int otg_en;
+ int ret;
+
+ /* Check in OTG mode or not */
+ ret = mt6370_chg_field_get(priv, F_BOOST_STAT, &otg_en);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get OTG state\n");
+ return IRQ_NONE;
+ }
+
+ if (otg_en)
+ return IRQ_HANDLED;
+
+ mutex_lock(&priv->attach_lock);
+ priv->attach = MT6370_ATTACH_STAT_ATTACH_BC12_DONE;
+ mutex_unlock(&priv->attach_lock);
+
+ if (!queue_work(priv->wq, &priv->bc12_work))
+ dev_err(priv->dev, "bc12 work has already queued\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6370_uvp_d_evt_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+
+ mt6370_chg_pwr_rdy_check(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6370_mivr_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+
+ pm_stay_awake(priv->dev);
+ disable_irq_nosync(priv->irq_nums[MT6370_IRQ_MIVR]);
+ schedule_delayed_work(&priv->mivr_dwork, msecs_to_jiffies(200));
+
+ return IRQ_HANDLED;
+}
+
+#define MT6370_CHG_IRQ(_name) \
+{ \
+ .name = #_name, \
+ .handler = mt6370_##_name##_handler, \
+}
+
+static int mt6370_chg_init_irq(struct mt6370_priv *priv)
+{
+ int i, ret;
+ const struct {
+ char *name;
+ irq_handler_t handler;
+ } mt6370_chg_irqs[] = {
+ MT6370_CHG_IRQ(attach_i),
+ MT6370_CHG_IRQ(uvp_d_evt),
+ MT6370_CHG_IRQ(mivr),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mt6370_chg_irqs); i++) {
+ ret = platform_get_irq_byname(to_platform_device(priv->dev),
+ mt6370_chg_irqs[i].name);
+ if (ret < 0)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to get irq %s\n",
+ mt6370_chg_irqs[i].name);
+
+ priv->irq_nums[i] = ret;
+ ret = devm_request_threaded_irq(priv->dev, ret, NULL,
+ mt6370_chg_irqs[i].handler,
+ IRQF_TRIGGER_FALLING,
+ dev_name(priv->dev), priv);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to request irq %s\n",
+ mt6370_chg_irqs[i].name);
+ }
+
+ return 0;
+}
+
+static int mt6370_chg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mt6370_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ ret = mt6370_chg_init_rmap_fields(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regmap fields\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->iio_adcs = devm_iio_channel_get_all(priv->dev);
+ if (IS_ERR(priv->iio_adcs))
+ return dev_err_probe(dev, PTR_ERR(priv->iio_adcs),
+ "Failed to get iio adc\n");
+
+ ret = mt6370_chg_init_otg_regulator(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init OTG regulator\n");
+
+ ret = mt6370_chg_init_psy(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init psy\n");
+
+ mutex_init(&priv->attach_lock);
+ ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_attach_lock,
+ &priv->attach_lock);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+
+ priv->attach = MT6370_ATTACH_STAT_DETACH;
+
+ priv->wq = create_singlethread_workqueue(dev_name(priv->dev));
+ if (!priv->wq)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to create workqueue\n");
+
+ ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_wq, priv->wq);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wq\n");
+
+ ret = devm_work_autocancel(dev, &priv->bc12_work, mt6370_chg_bc12_work_func);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init bc12 work\n");
+
+ ret = devm_delayed_work_autocancel(dev, &priv->mivr_dwork, mt6370_chg_mivr_dwork_func);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init mivr delayed work\n");
+
+ ret = mt6370_chg_init_setting(priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init mt6370 charger setting\n");
+
+ ret = mt6370_chg_init_irq(priv);
+ if (ret)
+ return ret;
+
+ mt6370_chg_pwr_rdy_check(priv);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_chg_of_match[] = {
+ { .compatible = "mediatek,mt6370-charger", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_chg_of_match);
+
+static struct platform_driver mt6370_chg_driver = {
+ .probe = mt6370_chg_probe,
+ .driver = {
+ .name = "mt6370-charger",
+ .of_match_table = mt6370_chg_of_match,
+ },
+};
+module_platform_driver(mt6370_chg_driver);
+
+MODULE_AUTHOR("ChiaEn Wu <chiaen_wu@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 4239591e1522..5369abaceb5c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -442,7 +442,7 @@ static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env,
if (ret == -ENODEV || ret == -ENODATA) {
/*
* When a battery is absent, we expect -ENODEV. Don't abort;
- * send the uevent with at least the the PRESENT=0 property
+ * send the uevent with at least the PRESENT=0 property
*/
return 0;
}
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
new file mode 100644
index 000000000000..635f051b0821
--- /dev/null
+++ b/drivers/power/supply/rk817_charger.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Charger Driver for Rockchip rk817
+ *
+ * Copyright (c) 2021 Maya Matuszczyk <maccraft123mc@gmail.com>
+ *
+ * Authors: Maya Matuszczyk <maccraft123mc@gmail.com>
+ * Chris Morgan <macromorgan@hotmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/devm-helpers.h>
+#include <linux/mfd/rk808.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+/* Charging statuses reported by hardware register */
+enum rk817_charge_status {
+ CHRG_OFF,
+ DEAD_CHRG,
+ TRICKLE_CHRG,
+ CC_OR_CV_CHRG,
+ CHARGE_FINISH,
+ USB_OVER_VOL,
+ BAT_TMP_ERR,
+ BAT_TIM_ERR,
+};
+
+/*
+ * Max charging current read to/written from hardware register.
+ * Note how highest value corresponding to 0x7 is the lowest
+ * current, this is per the datasheet.
+ */
+enum rk817_chg_cur {
+ CHG_1A,
+ CHG_1_5A,
+ CHG_2A,
+ CHG_2_5A,
+ CHG_2_75A,
+ CHG_3A,
+ CHG_3_5A,
+ CHG_0_5A,
+};
+
+struct rk817_charger {
+ struct device *dev;
+ struct rk808 *rk808;
+
+ struct power_supply *bat_ps;
+ struct power_supply *chg_ps;
+ bool plugged_in;
+ bool battery_present;
+
+ /*
+ * voltage_k and voltage_b values are used to calibrate the ADC
+ * voltage readings. While they are documented in the BSP kernel and
+ * datasheet as voltage_k and voltage_b, there is no further
+ * information explaining them in more detail.
+ */
+
+ uint32_t voltage_k;
+ uint32_t voltage_b;
+
+ /*
+ * soc - state of charge - like the BSP this is stored as a percentage,
+ * to the thousandth. BSP has a display state of charge (dsoc) and a
+ * remaining state of charge (rsoc). This value will be used for both
+ * purposes here so we don't do any fancy math to try and "smooth" the
+ * charge and just report it as it is. Note for example an soc of 100
+ * is stored as 100000, an soc of 50 is stored as 50000, etc.
+ */
+ int soc;
+
+ /*
+ * Capacity of battery when fully charged, equal or less than design
+ * capacity depending upon wear. BSP kernel saves to nvram in mAh,
+ * so this value is in mAh not the standard uAh.
+ */
+ int fcc_mah;
+
+ /*
+ * Calibrate the SOC on a fully charged battery, this way we can use
+ * the calibrated SOC value to correct for columb counter drift.
+ */
+ bool soc_cal;
+
+ /* Implementation specific immutable properties from device tree */
+ int res_div;
+ int sleep_enter_current_ua;
+ int sleep_filter_current_ua;
+ int bat_charge_full_design_uah;
+ int bat_voltage_min_design_uv;
+ int bat_voltage_max_design_uv;
+
+ /* Values updated periodically by driver for display. */
+ int charge_now_uah;
+ int volt_avg_uv;
+ int cur_avg_ua;
+ int max_chg_cur_ua;
+ int max_chg_volt_uv;
+ int charge_status;
+ int charger_input_volt_avg_uv;
+
+ /* Work queue to periodically update values. */
+ struct delayed_work work;
+};
+
+/* ADC coefficients extracted from BSP kernel */
+#define ADC_TO_CURRENT(adc_value, res_div) \
+ (adc_value * 172 / res_div)
+
+#define CURRENT_TO_ADC(current, samp_res) \
+ (current * samp_res / 172)
+
+#define CHARGE_TO_ADC(capacity, res_div) \
+ (capacity * res_div * 3600 / 172 * 1000)
+
+#define ADC_TO_CHARGE_UAH(adc_value, res_div) \
+ (adc_value / 3600 * 172 / res_div)
+
+static u8 rk817_chg_cur_to_reg(u32 chg_cur_ma)
+{
+ if (chg_cur_ma >= 3500)
+ return CHG_3_5A;
+ else if (chg_cur_ma >= 3000)
+ return CHG_3A;
+ else if (chg_cur_ma >= 2750)
+ return CHG_2_75A;
+ else if (chg_cur_ma >= 2500)
+ return CHG_2_5A;
+ else if (chg_cur_ma >= 2000)
+ return CHG_2A;
+ else if (chg_cur_ma >= 1500)
+ return CHG_1_5A;
+ else if (chg_cur_ma >= 1000)
+ return CHG_1A;
+ else if (chg_cur_ma >= 500)
+ return CHG_0_5A;
+ else
+ return -EINVAL;
+}
+
+static int rk817_chg_cur_from_reg(u8 reg)
+{
+ switch (reg) {
+ case CHG_0_5A:
+ return 500000;
+ case CHG_1A:
+ return 1000000;
+ case CHG_1_5A:
+ return 1500000;
+ case CHG_2A:
+ return 2000000;
+ case CHG_2_5A:
+ return 2500000;
+ case CHG_2_75A:
+ return 2750000;
+ case CHG_3A:
+ return 3000000;
+ case CHG_3_5A:
+ return 3500000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void rk817_bat_calib_vol(struct rk817_charger *charger)
+{
+ uint32_t vcalib0 = 0;
+ uint32_t vcalib1 = 0;
+ u8 bulk_reg[2];
+
+ /* calibrate voltage */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_VCALIB0_H,
+ bulk_reg, 2);
+ vcalib0 = get_unaligned_be16(bulk_reg);
+
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_VCALIB1_H,
+ bulk_reg, 2);
+ vcalib1 = get_unaligned_be16(bulk_reg);
+
+ /* values were taken from BSP kernel */
+ charger->voltage_k = (4025 - 2300) * 1000 /
+ ((vcalib1 - vcalib0) ? (vcalib1 - vcalib0) : 1);
+ charger->voltage_b = 4025 - (charger->voltage_k * vcalib1) / 1000;
+}
+
+static void rk817_bat_calib_cur(struct rk817_charger *charger)
+{
+ u8 bulk_reg[2];
+
+ /* calibrate current */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_IOFFSET_H,
+ bulk_reg, 2);
+ regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_CAL_OFFSET_H,
+ bulk_reg, 2);
+}
+
+/*
+ * note that only the fcc_mah is really used by this driver, the other values
+ * are to ensure we can remain backwards compatible with the BSP kernel.
+ */
+static int rk817_record_battery_nvram_values(struct rk817_charger *charger)
+{
+ u8 bulk_reg[3];
+ int ret, rsoc;
+
+ /*
+ * write the soc value to the nvram location used by the BSP kernel
+ * for the dsoc value.
+ */
+ put_unaligned_le24(charger->soc, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_R1,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ /*
+ * write the remaining capacity in mah to the nvram location used by
+ * the BSP kernel for the rsoc value.
+ */
+ rsoc = (charger->soc * charger->fcc_mah) / 100000;
+ put_unaligned_le24(rsoc, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_DATA0,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ /* write the fcc_mah in mAh, just as the BSP kernel does. */
+ put_unaligned_le24(charger->fcc_mah, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_DATA3,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rk817_bat_calib_cap(struct rk817_charger *charger)
+{
+ struct rk808 *rk808 = charger->rk808;
+ int tmp, charge_now, charge_now_adc, volt_avg;
+ u8 bulk_reg[4];
+
+ /* Calibrate the soc and fcc on a fully charged battery */
+
+ if (charger->charge_status == CHARGE_FINISH && (!charger->soc_cal)) {
+ /*
+ * soc should be 100000 and columb counter should show the full
+ * charge capacity. Note that if the device is unplugged for a
+ * period of several days the columb counter will have a large
+ * margin of error, so setting it back to the full charge on
+ * a completed charge cycle should correct this (my device was
+ * showing 33% battery after 3 days unplugged when it should
+ * have been closer to 95% based on voltage and charge
+ * current).
+ */
+
+ charger->soc = 100000;
+ charge_now_adc = CHARGE_TO_ADC(charger->fcc_mah,
+ charger->res_div);
+ put_unaligned_be32(charge_now_adc, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+
+ charger->soc_cal = 1;
+ dev_dbg(charger->dev,
+ "Fully charged. SOC is %d, full capacity is %d\n",
+ charger->soc, charger->fcc_mah * 1000);
+ }
+
+ /*
+ * The columb counter can drift up slightly, so we should correct for
+ * it. But don't correct it until we're at 100% soc.
+ */
+ if (charger->charge_status == CHARGE_FINISH && charger->soc_cal) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ charge_now_adc = get_unaligned_be32(bulk_reg);
+ if (charge_now_adc < 0)
+ return charge_now_adc;
+ charge_now = ADC_TO_CHARGE_UAH(charge_now_adc,
+ charger->res_div);
+
+ /*
+ * Re-init columb counter with updated values to correct drift.
+ */
+ if (charge_now / 1000 > charger->fcc_mah) {
+ dev_dbg(charger->dev,
+ "Recalibrating columb counter to %d uah\n",
+ charge_now);
+ /*
+ * Order of operations matters here to ensure we keep
+ * enough precision until the last step to keep from
+ * making needless updates to columb counter.
+ */
+ charge_now_adc = CHARGE_TO_ADC(charger->fcc_mah,
+ charger->res_div);
+ put_unaligned_be32(charge_now_adc, bulk_reg);
+ regmap_bulk_write(rk808->regmap,
+ RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+ }
+ }
+
+ /*
+ * Calibrate the fully charged capacity when we previously had a full
+ * battery (soc_cal = 1) and are now empty (at or below minimum design
+ * voltage). If our columb counter is still positive, subtract that
+ * from our fcc value to get a calibrated fcc, and if our columb
+ * counter is negative add that to our fcc (but not to exceed our
+ * design capacity).
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ volt_avg = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+ if (volt_avg <= charger->bat_voltage_min_design_uv &&
+ charger->soc_cal) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ charge_now_adc = get_unaligned_be32(bulk_reg);
+ charge_now = ADC_TO_CHARGE_UAH(charge_now_adc,
+ charger->res_div);
+ /*
+ * Note, if charge_now is negative this will add it (what we
+ * want) and if it's positive this will subtract (also what
+ * we want).
+ */
+ charger->fcc_mah = charger->fcc_mah - (charge_now / 1000);
+
+ dev_dbg(charger->dev,
+ "Recalibrating full charge capacity to %d uah\n",
+ charger->fcc_mah * 1000);
+ }
+
+ rk817_record_battery_nvram_values(charger);
+
+ return 0;
+}
+
+static void rk817_read_props(struct rk817_charger *charger)
+{
+ int tmp, reg;
+ u8 bulk_reg[4];
+
+ /*
+ * Recalibrate voltage and current readings if we need to BSP does both
+ * on CUR_CALIB_UPD, ignoring VOL_CALIB_UPD. Curiously enough, both
+ * documentation and the BSP show that you perform an update if bit 7
+ * is 1, but you clear the status by writing a 1 to bit 7.
+ */
+ regmap_read(charger->rk808->regmap, RK817_GAS_GAUGE_ADC_CONFIG1, &reg);
+ if (reg & RK817_VOL_CUR_CALIB_UPD) {
+ rk817_bat_calib_cur(charger);
+ rk817_bat_calib_vol(charger);
+ regmap_write_bits(charger->rk808->regmap,
+ RK817_GAS_GAUGE_ADC_CONFIG1,
+ RK817_VOL_CUR_CALIB_UPD,
+ RK817_VOL_CUR_CALIB_UPD);
+ }
+
+ /* Update reported charge. */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ charger->charge_now_uah = ADC_TO_CHARGE_UAH(tmp, charger->res_div);
+ if (charger->charge_now_uah < 0)
+ charger->charge_now_uah = 0;
+ if (charger->charge_now_uah > charger->fcc_mah * 1000)
+ charger->charge_now_uah = charger->fcc_mah * 1000;
+
+ /* Update soc based on reported charge. */
+ charger->soc = charger->charge_now_uah * 100 / charger->fcc_mah;
+
+ /* Update reported voltage. */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ charger->volt_avg_uv = (charger->voltage_k * tmp) + 1000 *
+ charger->voltage_b;
+
+ /*
+ * Update reported current. Note value from registers is a signed 16
+ * bit int.
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_CUR_H,
+ bulk_reg, 2);
+ tmp = (short int)get_unaligned_be16(bulk_reg);
+ charger->cur_avg_ua = ADC_TO_CURRENT(tmp, charger->res_div);
+
+ /*
+ * Update the max charge current. This value shouldn't change, but we
+ * can read it to report what the PMIC says it is instead of simply
+ * returning the default value.
+ */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_OUT, &reg);
+ charger->max_chg_cur_ua =
+ rk817_chg_cur_from_reg(reg & RK817_CHRG_CUR_SEL);
+
+ /*
+ * Update max charge voltage. Like the max charge current this value
+ * shouldn't change, but we can report what the PMIC says.
+ */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_OUT, &reg);
+ charger->max_chg_volt_uv = ((((reg & RK817_CHRG_VOL_SEL) >> 4) *
+ 50000) + 4100000);
+
+ /* Check if battery still present. */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_STS, &reg);
+ charger->battery_present = (reg & RK817_BAT_EXS);
+
+ /* Get which type of charge we are using (if any). */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_STS, &reg);
+ charger->charge_status = (reg >> 4) & 0x07;
+
+ /*
+ * Get charger input voltage. Note that on my example hardware (an
+ * Odroid Go Advance) the voltage of the power connector is measured
+ * on the register labelled USB in the datasheet; I don't know if this
+ * is how it is designed or just a quirk of the implementation. I
+ * believe this will also measure the voltage of the USB output when in
+ * OTG mode, if that is the case we may need to change this in the
+ * future to return 0 if the power supply status is offline (I can't
+ * test this with my current implementation. Also, when the voltage
+ * should be zero sometimes the ADC still shows a single bit (which
+ * would register as 20000uv). When this happens set it to 0.
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_USB_VOL_H,
+ bulk_reg, 2);
+ reg = get_unaligned_be16(bulk_reg);
+ if (reg > 1) {
+ tmp = ((charger->voltage_k * reg / 1000 + charger->voltage_b) *
+ 60 / 46);
+ charger->charger_input_volt_avg_uv = tmp * 1000;
+ } else {
+ charger->charger_input_volt_avg_uv = 0;
+ }
+
+ /* Calibrate battery capacity and soc. */
+ rk817_bat_calib_cap(charger);
+}
+
+static int rk817_bat_get_prop(struct power_supply *ps,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct rk817_charger *charger = power_supply_get_drvdata(ps);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = charger->battery_present;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (charger->cur_avg_ua < 0) {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ }
+ switch (charger->charge_status) {
+ case CHRG_OFF:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ /*
+ * Dead charge is documented, but not explained. I never
+ * observed it but assume it's a pre-charge for a dead
+ * battery.
+ */
+ case DEAD_CHRG:
+ case TRICKLE_CHRG:
+ case CC_OR_CV_CHRG:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case CHARGE_FINISH:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ return -EINVAL;
+
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ switch (charger->charge_status) {
+ case CHRG_OFF:
+ case CHARGE_FINISH:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case TRICKLE_CHRG:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case DEAD_CHRG:
+ case CC_OR_CV_CHRG:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = charger->fcc_mah * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = charger->bat_charge_full_design_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = charger->charge_now_uah;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = charger->bat_voltage_min_design_uv;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* Add 500 so that values like 99999 are 100% not 99%. */
+ val->intval = (charger->soc + 500) / 1000;
+ if (val->intval > 100)
+ val->intval = 100;
+ if (val->intval < 0)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = charger->volt_avg_uv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = charger->cur_avg_ua;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = charger->max_chg_cur_ua;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = charger->max_chg_volt_uv;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = charger->bat_voltage_max_design_uv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rk817_chg_get_prop(struct power_supply *ps,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct rk817_charger *charger = power_supply_get_drvdata(ps);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = charger->plugged_in;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ /* max voltage from datasheet at 5.5v (default 5.0v) */
+ val->intval = 5500000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ /* min voltage from datasheet at 3.8v (default 5.0v) */
+ val->intval = 3800000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = charger->charger_input_volt_avg_uv;
+ break;
+ /*
+ * While it's possible that other implementations could use different
+ * USB types, the current implementation for this PMIC (the Odroid Go
+ * Advance) only uses a dedicated charging port with no rx/tx lines.
+ */
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static irqreturn_t rk817_plug_in_isr(int irq, void *cg)
+{
+ struct rk817_charger *charger;
+
+ charger = (struct rk817_charger *)cg;
+ charger->plugged_in = 1;
+ power_supply_changed(charger->chg_ps);
+ power_supply_changed(charger->bat_ps);
+ /* try to recalibrate capacity if we hit full charge. */
+ charger->soc_cal = 0;
+
+ rk817_read_props(charger);
+
+ dev_dbg(charger->dev, "Power Cord Inserted\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk817_plug_out_isr(int irq, void *cg)
+{
+ struct rk817_charger *charger;
+ struct rk808 *rk808;
+
+ charger = (struct rk817_charger *)cg;
+ rk808 = charger->rk808;
+ charger->plugged_in = 0;
+ power_supply_changed(charger->bat_ps);
+ power_supply_changed(charger->chg_ps);
+
+ /*
+ * For some reason the bits of RK817_PMIC_CHRG_IN reset whenever the
+ * power cord is unplugged. This was not documented in the BSP kernel
+ * or the datasheet and only discovered by trial and error. Set minimum
+ * USB input voltage to 4.5v and enable USB voltage input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_VLIM_SEL, (0x05 << 4));
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_VLIM_EN,
+ (0x01 << 7));
+
+ /*
+ * Set average USB input current limit to 1.5A and enable USB current
+ * input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_ILIM_SEL, 0x03);
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_ILIM_EN,
+ (0x01 << 3));
+
+ rk817_read_props(charger);
+
+ dev_dbg(charger->dev, "Power Cord Removed\n");
+
+ return IRQ_HANDLED;
+}
+
+static enum power_supply_property rk817_bat_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static enum power_supply_property rk817_chg_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+};
+
+static enum power_supply_usb_type rk817_usb_type[] = {
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+};
+
+static const struct power_supply_desc rk817_bat_desc = {
+ .name = "rk817-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = rk817_bat_props,
+ .num_properties = ARRAY_SIZE(rk817_bat_props),
+ .get_property = rk817_bat_get_prop,
+};
+
+static const struct power_supply_desc rk817_chg_desc = {
+ .name = "rk817-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = rk817_usb_type,
+ .num_usb_types = ARRAY_SIZE(rk817_usb_type),
+ .properties = rk817_chg_props,
+ .num_properties = ARRAY_SIZE(rk817_chg_props),
+ .get_property = rk817_chg_get_prop,
+};
+
+static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+{
+ u8 bulk_reg[3];
+ int ret;
+
+ /* Read the nvram data for full charge capacity. */
+ ret = regmap_bulk_read(charger->rk808->regmap,
+ RK817_GAS_GAUGE_DATA3, bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ charger->fcc_mah = get_unaligned_le24(bulk_reg);
+
+ /*
+ * Sanity checking for values equal to zero or less than would be
+ * practical for this device (BSP Kernel assumes 500mAH or less) for
+ * practicality purposes. Also check if the value is too large and
+ * correct it.
+ */
+ if ((charger->fcc_mah < 500) ||
+ ((charger->fcc_mah * 1000) > charger->bat_charge_full_design_uah)) {
+ dev_info(charger->dev,
+ "Invalid NVRAM max charge, setting to %u uAH\n",
+ charger->bat_charge_full_design_uah);
+ charger->fcc_mah = charger->bat_charge_full_design_uah / 1000;
+ }
+
+ /*
+ * Read the nvram for state of charge. Sanity check for values greater
+ * than 100 (10000). If the value is off it should get corrected
+ * automatically when the voltage drops to the min (soc is 0) or when
+ * the battery is full (soc is 100).
+ */
+ ret = regmap_bulk_read(charger->rk808->regmap,
+ RK817_GAS_GAUGE_BAT_R1, bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ charger->soc = get_unaligned_le24(bulk_reg);
+ if (charger->soc > 10000)
+ charger->soc = 10000;
+
+ return 0;
+}
+
+static int
+rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ struct power_supply_battery_info *bat_info)
+{
+ struct rk808 *rk808 = charger->rk808;
+ u8 bulk_reg[4];
+ u32 boot_voltage, boot_charge_mah, tmp;
+ int ret, reg, off_time;
+ bool first_boot;
+
+ /*
+ * Check if the battery is uninitalized. If it is, the columb counter
+ * needs to be set up.
+ */
+ ret = regmap_read(rk808->regmap, RK817_GAS_GAUGE_GG_STS, &reg);
+ if (ret < 0)
+ return ret;
+ first_boot = reg & RK817_BAT_CON;
+ /*
+ * If the battery is uninitialized, use the poweron voltage and an ocv
+ * lookup to guess our charge. The number won't be very accurate until
+ * we hit either our minimum voltage (0%) or full charge (100%).
+ */
+ if (first_boot) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) +
+ 1000 * charger->voltage_b;
+ /*
+ * Since only implementation has no working thermistor, assume
+ * 20C for OCV lookup. If lookup fails, report error with OCV
+ * table.
+ */
+ charger->soc = power_supply_batinfo_ocv2cap(bat_info,
+ boot_voltage,
+ 20) * 1000;
+ if (charger->soc < 0)
+ charger->soc = 0;
+
+ /* Guess that full charge capacity is the design capacity */
+ charger->fcc_mah = charger->bat_charge_full_design_uah / 1000;
+ /*
+ * Set battery as "set up". BSP driver uses this value even
+ * though datasheet claims it's a read-only value.
+ */
+ regmap_write_bits(rk808->regmap, RK817_GAS_GAUGE_GG_STS,
+ RK817_BAT_CON, 0);
+ /* Save nvram values */
+ ret = rk817_record_battery_nvram_values(charger);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rk817_read_battery_nvram_values(charger);
+ if (ret < 0)
+ return ret;
+
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ if (tmp < 0)
+ tmp = 0;
+ boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ charger->res_div) / 1000;
+ /*
+ * Check if the columb counter has been off for more than 300
+ * minutes as it tends to drift downward. If so, re-init soc
+ * with the boot voltage instead. Note the unit values for the
+ * OFF_CNT register appear to be in decaminutes and stops
+ * counting at 2550 (0xFF) minutes. BSP kernel used OCV, but
+ * for me occasionally that would show invalid values. Boot
+ * voltage is only accurate for me on first poweron (not
+ * reboots), but we shouldn't ever encounter an OFF_CNT more
+ * than 0 on a reboot anyway.
+ */
+ regmap_read(rk808->regmap, RK817_GAS_GAUGE_OFF_CNT, &off_time);
+ if (off_time >= 30) {
+ regmap_bulk_read(rk808->regmap,
+ RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) +
+ 1000 * charger->voltage_b;
+ charger->soc =
+ power_supply_batinfo_ocv2cap(bat_info,
+ boot_voltage,
+ 20) * 1000;
+ } else {
+ charger->soc = (boot_charge_mah * 1000 * 100 /
+ charger->fcc_mah);
+ }
+ }
+
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ if (tmp < 0)
+ tmp = 0;
+ boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+
+ /*
+ * Now we have our full charge capacity and soc, init the columb
+ * counter.
+ */
+ boot_charge_mah = charger->soc * charger->fcc_mah / 100 / 1000;
+ if (boot_charge_mah > charger->fcc_mah)
+ boot_charge_mah = charger->fcc_mah;
+ tmp = CHARGE_TO_ADC(boot_charge_mah, charger->res_div);
+ put_unaligned_be32(tmp, bulk_reg);
+ ret = regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+ if (ret < 0)
+ return ret;
+
+ /* Set QMAX value to max design capacity. */
+ tmp = CHARGE_TO_ADC((charger->bat_charge_full_design_uah / 1000),
+ charger->res_div);
+ put_unaligned_be32(tmp, bulk_reg);
+ ret = regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_MAX_H3,
+ bulk_reg, 4);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rk817_battery_init(struct rk817_charger *charger,
+ struct power_supply_battery_info *bat_info)
+{
+ struct rk808 *rk808 = charger->rk808;
+ u32 tmp, max_chg_vol_mv, max_chg_cur_ma;
+ u8 max_chg_vol_reg, chg_term_i_reg, max_chg_cur_reg;
+ int ret, chg_term_ma;
+ u8 bulk_reg[2];
+
+ /* Get initial plug state */
+ regmap_read(rk808->regmap, RK817_SYS_STS, &tmp);
+ charger->plugged_in = (tmp & RK817_PLUG_IN_STS);
+
+ /*
+ * Turn on all ADC functions to measure battery, USB, and sys voltage,
+ * as well as batt temp. Note only tested implementation so far does
+ * not use a battery with a thermistor.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_ADC_CONFIG0, 0xfc);
+
+ /*
+ * Set relax mode voltage sampling interval and ADC offset calibration
+ * interval to 8 minutes to mirror BSP kernel. Set voltage and current
+ * modes to average to mirror BSP kernel.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_GG_CON, 0x04);
+
+ /* Calibrate voltage like the BSP does here. */
+ rk817_bat_calib_vol(charger);
+
+ /* Write relax threshold, derived from sleep enter current. */
+ tmp = CURRENT_TO_ADC(charger->sleep_enter_current_ua,
+ charger->res_div);
+ put_unaligned_be16(tmp, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_RELAX_THRE_H,
+ bulk_reg, 2);
+
+ /* Write sleep sample current, derived from sleep filter current. */
+ tmp = CURRENT_TO_ADC(charger->sleep_filter_current_ua,
+ charger->res_div);
+ put_unaligned_be16(tmp, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H,
+ bulk_reg, 2);
+
+ /* Restart battery relax voltage */
+ regmap_write_bits(rk808->regmap, RK817_GAS_GAUGE_GG_STS,
+ RK817_RELAX_VOL_UPD, (0x0 << 2));
+
+ /*
+ * Set OCV Threshold Voltage to 127.5mV. This was hard coded like this
+ * in the BSP.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_OCV_THRE_VOL, 0xff);
+
+ /*
+ * Set maximum charging voltage to battery max voltage. Trying to be
+ * incredibly safe with these value, as setting them wrong could
+ * overcharge the battery, which would be very bad.
+ */
+ max_chg_vol_mv = bat_info->constant_charge_voltage_max_uv / 1000;
+ max_chg_cur_ma = bat_info->constant_charge_current_max_ua / 1000;
+
+ if (max_chg_vol_mv < 4100) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger voltage, value %u unsupported\n",
+ max_chg_vol_mv * 1000);
+ }
+ if (max_chg_vol_mv > 4450) {
+ dev_info(charger->dev,
+ "Setting max charge voltage to 4450000uv\n");
+ max_chg_vol_mv = 4450;
+ }
+
+ if (max_chg_cur_ma < 500) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger current, value %u unsupported\n",
+ max_chg_cur_ma * 1000);
+ }
+ if (max_chg_cur_ma > 3500)
+ dev_info(charger->dev,
+ "Setting max charge current to 3500000ua\n");
+
+ /*
+ * Now that the values are sanity checked, if we subtract 4100 from the
+ * max voltage and divide by 50, we conviently get the exact value for
+ * the registers, which are 4.1v, 4.15v, 4.2v, 4.25v, 4.3v, 4.35v,
+ * 4.4v, and 4.45v; these correspond to values 0x00 through 0x07.
+ */
+ max_chg_vol_reg = (max_chg_vol_mv - 4100) / 50;
+
+ max_chg_cur_reg = rk817_chg_cur_to_reg(max_chg_cur_ma);
+
+ if (max_chg_vol_reg < 0 || max_chg_vol_reg > 7) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger voltage, value %u unsupported\n",
+ max_chg_vol_mv * 1000);
+ }
+ if (max_chg_cur_reg < 0 || max_chg_cur_reg > 7) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger current, value %u unsupported\n",
+ max_chg_cur_ma * 1000);
+ }
+
+ /*
+ * Write the values to the registers, and deliver an emergency warning
+ * in the event they are not written correctly.
+ */
+ ret = regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_OUT,
+ RK817_CHRG_VOL_SEL, (max_chg_vol_reg << 4));
+ if (ret) {
+ dev_emerg(charger->dev,
+ "Danger, unable to set max charger voltage: %u\n",
+ ret);
+ }
+
+ ret = regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_OUT,
+ RK817_CHRG_CUR_SEL, max_chg_cur_reg);
+ if (ret) {
+ dev_emerg(charger->dev,
+ "Danger, unable to set max charger current: %u\n",
+ ret);
+ }
+
+ /* Set charge finishing mode to analog */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_TERM,
+ RK817_CHRG_TERM_ANA_DIG, (0x0 << 2));
+
+ /*
+ * Set charge finish current, warn if value not in range and keep
+ * default.
+ */
+ chg_term_ma = bat_info->charge_term_current_ua / 1000;
+ if (chg_term_ma < 150 || chg_term_ma > 400) {
+ dev_warn(charger->dev,
+ "Invalid charge termination %u, keeping default\n",
+ chg_term_ma * 1000);
+ chg_term_ma = 200;
+ }
+
+ /*
+ * Values of 150ma, 200ma, 300ma, and 400ma correspond to 00, 01, 10,
+ * and 11.
+ */
+ chg_term_i_reg = (chg_term_ma - 100) / 100;
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_TERM,
+ RK817_CHRG_TERM_ANA_SEL, chg_term_i_reg);
+
+ ret = rk817_read_or_set_full_charge_on_boot(charger, bat_info);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set minimum USB input voltage to 4.5v and enable USB voltage input
+ * limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_VLIM_SEL, (0x05 << 4));
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_VLIM_EN,
+ (0x01 << 7));
+
+ /*
+ * Set average USB input current limit to 1.5A and enable USB current
+ * input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_ILIM_SEL, 0x03);
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_ILIM_EN,
+ (0x01 << 3));
+
+ return 0;
+}
+
+static void rk817_charging_monitor(struct work_struct *work)
+{
+ struct rk817_charger *charger;
+
+ charger = container_of(work, struct rk817_charger, work.work);
+
+ rk817_read_props(charger);
+
+ /* Run every 8 seconds like the BSP driver did. */
+ queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+}
+
+static int rk817_charger_probe(struct platform_device *pdev)
+{
+ struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+ struct rk817_charger *charger;
+ struct device_node *node;
+ struct power_supply_battery_info *bat_info;
+ struct device *dev = &pdev->dev;
+ struct power_supply_config pscfg = {};
+ int plugin_irq, plugout_irq;
+ int of_value;
+ int ret;
+
+ node = of_get_child_by_name(dev->parent->of_node, "charger");
+ if (!node)
+ return -ENODEV;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->rk808 = rk808;
+
+ charger->dev = &pdev->dev;
+ platform_set_drvdata(pdev, charger);
+
+ rk817_bat_calib_vol(charger);
+
+ pscfg.drv_data = charger;
+ pscfg.of_node = node;
+
+ /*
+ * Get sample resistor value. Note only values of 10000 or 20000
+ * microohms are allowed. Schematic for my test implementation (an
+ * Odroid Go Advance) shows a 10 milliohm resistor for reference.
+ */
+ ret = of_property_read_u32(node, "rockchip,resistor-sense-micro-ohms",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sample resistor value\n");
+ }
+ /*
+ * Store as a 1 or a 2, since all we really use the value for is as a
+ * divisor in some calculations.
+ */
+ charger->res_div = (of_value == 20000) ? 2 : 1;
+
+ /*
+ * Get sleep enter current value. Not sure what this value is for
+ * other than to help calibrate the relax threshold.
+ */
+ ret = of_property_read_u32(node,
+ "rockchip,sleep-enter-current-microamp",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sleep enter cur value\n");
+ }
+ charger->sleep_enter_current_ua = of_value;
+
+ /* Get sleep filter current value */
+ ret = of_property_read_u32(node,
+ "rockchip,sleep-filter-current-microamp",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sleep filter cur value\n");
+ }
+
+ charger->sleep_filter_current_ua = of_value;
+
+ charger->bat_ps = devm_power_supply_register(&pdev->dev,
+ &rk817_bat_desc, &pscfg);
+
+ charger->chg_ps = devm_power_supply_register(&pdev->dev,
+ &rk817_chg_desc, &pscfg);
+
+ if (IS_ERR(charger->chg_ps))
+ return dev_err_probe(dev, -EINVAL,
+ "Battery failed to probe\n");
+
+ if (IS_ERR(charger->chg_ps))
+ return dev_err_probe(dev, -EINVAL,
+ "Charger failed to probe\n");
+
+ ret = power_supply_get_battery_info(charger->bat_ps,
+ &bat_info);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "Unable to get battery info: %d\n", ret);
+ }
+
+ if ((bat_info->charge_full_design_uah <= 0) ||
+ (bat_info->voltage_min_design_uv <= 0) ||
+ (bat_info->voltage_max_design_uv <= 0) ||
+ (bat_info->constant_charge_voltage_max_uv <= 0) ||
+ (bat_info->constant_charge_current_max_ua <= 0) ||
+ (bat_info->charge_term_current_ua <= 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "Required bat info missing or invalid\n");
+ }
+
+ charger->bat_charge_full_design_uah = bat_info->charge_full_design_uah;
+ charger->bat_voltage_min_design_uv = bat_info->voltage_min_design_uv;
+ charger->bat_voltage_max_design_uv = bat_info->voltage_max_design_uv;
+
+ /*
+ * Has to run after power_supply_get_battery_info as it depends on some
+ * values discovered from that routine.
+ */
+ ret = rk817_battery_init(charger, bat_info);
+ if (ret)
+ return ret;
+
+ power_supply_put_battery_info(charger->bat_ps, bat_info);
+
+ plugin_irq = platform_get_irq(pdev, 0);
+ if (plugin_irq < 0)
+ return plugin_irq;
+
+ plugout_irq = platform_get_irq(pdev, 1);
+ if (plugout_irq < 0)
+ return plugout_irq;
+
+ ret = devm_request_threaded_irq(charger->dev, plugin_irq, NULL,
+ rk817_plug_in_isr,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "rk817_plug_in", charger);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "plug_in_irq request failed!\n");
+ }
+
+ ret = devm_request_threaded_irq(charger->dev, plugout_irq, NULL,
+ rk817_plug_out_isr,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "rk817_plug_out", charger);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "plug_out_irq request failed!\n");
+ }
+
+ ret = devm_delayed_work_autocancel(&pdev->dev, &charger->work,
+ rk817_charging_monitor);
+ if (ret)
+ return ret;
+
+ /* Force the first update immediately. */
+ mod_delayed_work(system_wq, &charger->work, 0);
+
+ return 0;
+}
+
+
+static struct platform_driver rk817_charger_driver = {
+ .probe = rk817_charger_probe,
+ .driver = {
+ .name = "rk817-charger",
+ },
+};
+module_platform_driver(rk817_charger_driver);
+
+MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
+MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index ba33d1617e0b..a4bc9f2a10bc 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -50,7 +50,7 @@ static int tps65217_config_charger(struct tps65217_charger *charger)
* tps65217 rev. G, p. 31 (see p. 32 for NTC schematic)
*
* The device can be configured to support a 100k NTC (B = 3960) by
- * setting the the NTC_TYPE bit in register CHGCONFIG1 to 1. However it
+ * setting the NTC_TYPE bit in register CHGCONFIG1 to 1. However it
* is not recommended to do so. In sleep mode, the charger continues
* charging the battery, but all register values are reset to default
* values. Therefore, the charger would get the wrong temperature
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index a20bf12f3ce3..999e218d7793 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 0;
- wait_task_inactive(iit->tsk, 0);
+ wait_task_inactive(iit->tsk, TASK_ANY);
}
cpu_hotplug_enable();
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index d36c3f597f77..a48d9b7d2921 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3657,6 +3657,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
struct device *dev = &bp->dev;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
+ sysfs_remove_link(&dev->kobj, "ttyGNSS2");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index cfe3a0327471..d333e7422f4a 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -678,7 +678,7 @@ static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
mutex_lock(&pwm_lock);
list_for_each_entry(chip, &pwm_chips, list)
- if (chip->dev && dev_fwnode(chip->dev) == fwnode) {
+ if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
mutex_unlock(&pwm_lock);
return chip;
}
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index c893ec3d2fb4..98413d364338 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -14,35 +14,6 @@
#include "pwm-lpss.h"
-/* BayTrail */
-static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
- .clk_rate = 25000000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Braswell */
-static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
- .clk_rate = 19200000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Broxton */
-static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
- .bypass = true,
-};
-
-/* Tangier */
-static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
-};
-
static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -54,8 +25,12 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
if (err < 0)
return err;
+ err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (err)
+ return err;
+
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
+ lpwm = pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
@@ -73,7 +48,6 @@ static void pwm_lpss_remove_pci(struct pci_dev *pdev)
pm_runtime_get_sync(&pdev->dev);
}
-#ifdef CONFIG_PM
static int pwm_lpss_runtime_suspend_pci(struct device *dev)
{
/*
@@ -87,12 +61,11 @@ static int pwm_lpss_runtime_resume_pci(struct device *dev)
{
return 0;
}
-#endif
-static const struct dev_pm_ops pwm_lpss_pci_pm = {
- SET_RUNTIME_PM_OPS(pwm_lpss_runtime_suspend_pci,
- pwm_lpss_runtime_resume_pci, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(pwm_lpss_pci_pm,
+ pwm_lpss_runtime_suspend_pci,
+ pwm_lpss_runtime_resume_pci,
+ NULL);
static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
@@ -114,10 +87,11 @@ static struct pci_driver pwm_lpss_driver_pci = {
.probe = pwm_lpss_probe_pci,
.remove = pwm_lpss_remove_pci,
.driver = {
- .pm = &pwm_lpss_pci_pm,
+ .pm = pm_ptr(&pwm_lpss_pci_pm),
},
};
module_pci_driver(pwm_lpss_driver_pci);
MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PWM_LPSS);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 928570430cef..c48c6f2b2cd8 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -7,52 +7,31 @@
* Derived from the original pwm-lpss.c
*/
-#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include "pwm-lpss.h"
-/* BayTrail */
-static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
- .clk_rate = 25000000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Braswell */
-static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
- .clk_rate = 19200000,
- .npwm = 1,
- .base_unit_bits = 16,
- .other_devices_aml_touches_pwm_regs = true,
-};
-
-/* Broxton */
-static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
- .bypass = true,
-};
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
const struct pwm_lpss_boardinfo *info;
- const struct acpi_device_id *id;
struct pwm_lpss_chip *lpwm;
- struct resource *r;
+ void __iomem *base;
- id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
- if (!id)
+ info = device_get_match_data(&pdev->dev);
+ if (!info)
return -ENODEV;
- info = (const struct pwm_lpss_boardinfo *)id->driver_data;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- lpwm = pwm_lpss_probe(&pdev->dev, r, info);
+ lpwm = pwm_lpss_probe(&pdev->dev, base, info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
@@ -110,4 +89,5 @@ module_platform_driver(pwm_lpss_driver_platform);
MODULE_DESCRIPTION("PWM platform driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PWM_LPSS);
MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 36d4e83e6b79..accdef5dd58e 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -10,6 +10,7 @@
* Author: Alan Cox <alan@linux.intel.com>
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,17 +19,53 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#define DEFAULT_SYMBOL_NAMESPACE PWM_LPSS
+
#include "pwm-lpss.h"
#define PWM 0x00000000
#define PWM_ENABLE BIT(31)
#define PWM_SW_UPDATE BIT(30)
#define PWM_BASE_UNIT_SHIFT 8
-#define PWM_ON_TIME_DIV_MASK 0x000000ff
+#define PWM_ON_TIME_DIV_MASK GENMASK(7, 0)
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
+/* BayTrail */
+const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
+ .clk_rate = 25000000,
+ .npwm = 1,
+ .base_unit_bits = 16,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_byt_info);
+
+/* Braswell */
+const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
+ .clk_rate = 19200000,
+ .npwm = 1,
+ .base_unit_bits = 16,
+ .other_devices_aml_touches_pwm_regs = true,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info);
+
+/* Broxton */
+const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
+ .clk_rate = 19200000,
+ .npwm = 4,
+ .base_unit_bits = 22,
+ .bypass = true,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_bxt_info);
+
+/* Tangier */
+const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+ .clk_rate = 19200000,
+ .npwm = 4,
+ .base_unit_bits = 22,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_tng_info);
+
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_lpss_chip, chip);
@@ -207,7 +244,7 @@ static const struct pwm_ops pwm_lpss_ops = {
.owner = THIS_MODULE,
};
-struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, void __iomem *base,
const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
@@ -222,10 +259,7 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
if (!lpwm)
return ERR_PTR(-ENOMEM);
- lpwm->regs = devm_ioremap_resource(dev, r);
- if (IS_ERR(lpwm->regs))
- return ERR_CAST(lpwm->regs);
-
+ lpwm->regs = base;
lpwm->info = info;
c = lpwm->info->clk_rate;
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index 8b3476f25e06..8e82eb5a7e00 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -25,6 +25,11 @@ struct pwm_lpss_boardinfo {
unsigned long clk_rate;
unsigned int npwm;
unsigned long base_unit_bits;
+ /*
+ * Some versions of the IP may stuck in the state machine if enable
+ * bit is not set, and hence update bit will show busy status till
+ * the reset. For the rest it may be otherwise.
+ */
bool bypass;
/*
* On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
@@ -33,7 +38,12 @@ struct pwm_lpss_boardinfo {
bool other_devices_aml_touches_pwm_regs;
};
-struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_bxt_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_tng_info;
+
+struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, void __iomem *base,
const struct pwm_lpss_boardinfo *info);
#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index f3647b317152..a5af859217c1 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -328,22 +328,16 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
else
pc->pclk = pc->clk;
- if (IS_ERR(pc->pclk)) {
- ret = PTR_ERR(pc->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get APB clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(pc->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->pclk), "Can't get APB clk\n");
ret = clk_prepare_enable(pc->clk);
- if (ret) {
- dev_err(&pdev->dev, "Can't prepare enable PWM clk: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't prepare enable PWM clk\n");
ret = clk_prepare_enable(pc->pclk);
if (ret) {
- dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "Can't prepare enable APB clk\n");
goto err_clk;
}
@@ -360,7 +354,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
goto err_pclk;
}
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 9903c3a7eced..e7db8e45001c 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%llu\n", state.period);
+ return sysfs_emit(buf, "%llu\n", state.period);
}
static ssize_t period_store(struct device *child,
@@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%llu\n", state.duty_cycle);
+ return sysfs_emit(buf, "%llu\n", state.duty_cycle);
}
static ssize_t duty_cycle_store(struct device *child,
@@ -112,7 +112,7 @@ static ssize_t enable_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%d\n", state.enabled);
+ return sysfs_emit(buf, "%d\n", state.enabled);
}
static ssize_t enable_store(struct device *child,
@@ -171,7 +171,7 @@ static ssize_t polarity_show(struct device *child,
break;
}
- return sprintf(buf, "%s\n", polarity);
+ return sysfs_emit(buf, "%s\n", polarity);
}
static ssize_t polarity_store(struct device *child,
@@ -212,7 +212,7 @@ static ssize_t capture_show(struct device *child,
if (ret)
return ret;
- return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
+ return sysfs_emit(buf, "%u %u\n", result.period, result.duty_cycle);
}
static DEVICE_ATTR_RW(period);
@@ -361,7 +361,7 @@ static ssize_t npwm_show(struct device *parent, struct device_attribute *attr,
{
const struct pwm_chip *chip = dev_get_drvdata(parent);
- return sprintf(buf, "%u\n", chip->npwm);
+ return sysfs_emit(buf, "%u\n", chip->npwm);
}
static DEVICE_ATTR_RO(npwm);
@@ -433,7 +433,7 @@ static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
return ret;
}
-static int __maybe_unused pwm_class_suspend(struct device *parent)
+static int pwm_class_suspend(struct device *parent)
{
struct pwm_chip *chip = dev_get_drvdata(parent);
unsigned int i;
@@ -464,20 +464,20 @@ static int __maybe_unused pwm_class_suspend(struct device *parent)
return ret;
}
-static int __maybe_unused pwm_class_resume(struct device *parent)
+static int pwm_class_resume(struct device *parent)
{
struct pwm_chip *chip = dev_get_drvdata(parent);
return pwm_class_resume_npwm(parent, chip->npwm);
}
-static SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume);
static struct class pwm_class = {
.name = "pwm",
.owner = THIS_MODULE,
.dev_groups = pwm_chip_groups,
- .pm = &pwm_class_pm_ops,
+ .pm = pm_sleep_ptr(&pwm_class_pm_ops),
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index d663ab9670fe..070e4403c6c2 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1282,6 +1282,7 @@ config REGULATOR_STW481X_VMMC
config REGULATOR_SY7636A
tristate "Silergy SY7636A voltage regulator"
+ depends on MFD_SY7636A
help
This driver supports Silergy SY3686A voltage regulator.
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index ca0817f8e41e..899aa8dd12f0 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -599,7 +599,7 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)att->sa,
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)att->sa,
att->size, da, NULL, NULL, "dsp_mem");
if (mem)
@@ -635,7 +635,7 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)rmem->base,
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
rmem->size, da, NULL, NULL, it.node->name);
if (mem)
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 38383e7de3c1..7cc4fd207e2d 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -646,7 +646,6 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
struct mbox_client *cl;
- int ret;
if (!of_get_property(dev->of_node, "mbox-names", NULL))
return 0;
@@ -659,18 +658,15 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
cl->rx_callback = imx_rproc_rx_callback;
priv->tx_ch = mbox_request_channel_byname(cl, "tx");
- if (IS_ERR(priv->tx_ch)) {
- ret = PTR_ERR(priv->tx_ch);
- return dev_err_probe(cl->dev, ret,
- "failed to request tx mailbox channel: %d\n", ret);
- }
+ if (IS_ERR(priv->tx_ch))
+ return dev_err_probe(cl->dev, PTR_ERR(priv->tx_ch),
+ "failed to request tx mailbox channel\n");
priv->rx_ch = mbox_request_channel_byname(cl, "rx");
if (IS_ERR(priv->rx_ch)) {
mbox_free_channel(priv->tx_ch);
- ret = PTR_ERR(priv->rx_ch);
- return dev_err_probe(cl->dev, ret,
- "failed to request rx mailbox channel: %d\n", ret);
+ return dev_err_probe(cl->dev, PTR_ERR(priv->rx_ch),
+ "failed to request rx mailbox channel\n");
}
return 0;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 594a9b43b7ae..95b39741925d 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/remoteproc.h>
@@ -59,10 +59,10 @@ struct keystone_rproc {
int num_mems;
struct regmap *dev_ctrl;
struct reset_control *reset;
+ struct gpio_desc *kick_gpio;
u32 boot_offset;
int irq_ring;
int irq_fault;
- int kick_gpio;
struct work_struct workqueue;
};
@@ -232,10 +232,10 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
{
struct keystone_rproc *ksproc = rproc->priv;
- if (WARN_ON(ksproc->kick_gpio < 0))
+ if (!ksproc->kick_gpio)
return;
- gpio_set_value(ksproc->kick_gpio, 1);
+ gpiod_set_value(ksproc->kick_gpio, 1);
}
/*
@@ -432,9 +432,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
goto disable_clk;
}
- ksproc->kick_gpio = of_get_named_gpio_flags(np, "kick-gpios", 0, NULL);
- if (ksproc->kick_gpio < 0) {
- ret = ksproc->kick_gpio;
+ ksproc->kick_gpio = gpiod_get(dev, "kick", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(ksproc->kick_gpio);
+ if (ret) {
dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
ret);
goto disable_clk;
@@ -466,6 +466,7 @@ static int keystone_rproc_probe(struct platform_device *pdev)
release_mem:
of_reserved_mem_device_release(dev);
+ gpiod_put(ksproc->kick_gpio);
disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
@@ -480,6 +481,7 @@ static int keystone_rproc_remove(struct platform_device *pdev)
struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
rproc_del(ksproc->rproc);
+ gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
rproc_free(ksproc->rproc);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index e5279ed9a8d7..8768cb64f560 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -23,9 +23,7 @@
#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
@@ -346,7 +344,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
return -ENOMEM;
} else {
- /* Register carveout in in list */
+ /* Register carveout in list */
mem = rproc_mem_entry_init(dev, NULL, 0,
size, rsc->vring[i].da,
rproc_alloc_carveout,
@@ -384,7 +382,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
return 0;
}
-static int
+int
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{
struct rproc *rproc = rvdev->rproc;
@@ -435,57 +433,17 @@ void rproc_free_vring(struct rproc_vring *rvring)
}
}
-static int rproc_vdev_do_start(struct rproc_subdev *subdev)
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev)
{
- struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
-
- return rproc_add_virtio_dev(rvdev, rvdev->id);
-}
-
-static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
-{
- struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
- int ret;
-
- ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
- if (ret)
- dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
-}
-
-/**
- * rproc_rvdev_release() - release the existence of a rvdev
- *
- * @dev: the subdevice's dev
- */
-static void rproc_rvdev_release(struct device *dev)
-{
- struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
-
- of_reserved_mem_device_release(dev);
- dma_release_coherent_memory(dev);
-
- kfree(rvdev);
+ if (rvdev && rproc)
+ list_add_tail(&rvdev->node, &rproc->rvdevs);
}
-static int copy_dma_range_map(struct device *to, struct device *from)
+void rproc_remove_rvdev(struct rproc_vdev *rvdev)
{
- const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
- int num_ranges = 0;
-
- if (!map)
- return 0;
-
- for (r = map; r->size; r++)
- num_ranges++;
-
- new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
- GFP_KERNEL);
- if (!new_map)
- return -ENOMEM;
- to->dma_range_map = new_map;
- return 0;
+ if (rvdev)
+ list_del(&rvdev->node);
}
-
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
@@ -520,12 +478,13 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
- int i, ret;
- char name[16];
+ size_t rsc_size;
+ struct rproc_vdev_data rvdev_data;
+ struct platform_device *pdev;
/* make sure resource isn't truncated */
- if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
- avail) {
+ rsc_size = struct_size(rsc, vring, rsc->num_of_vrings);
+ if (size_add(rsc_size, rsc->config_len) > avail) {
dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
@@ -545,93 +504,19 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
return -EINVAL;
}
- rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
- if (!rvdev)
- return -ENOMEM;
-
- kref_init(&rvdev->refcount);
-
- rvdev->id = rsc->id;
- rvdev->rproc = rproc;
- rvdev->index = rproc->nb_vdev++;
+ rvdev_data.id = rsc->id;
+ rvdev_data.index = rproc->nb_vdev++;
+ rvdev_data.rsc_offset = offset;
+ rvdev_data.rsc = rsc;
- /* Initialise vdev subdevice */
- snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
- rvdev->dev.parent = &rproc->dev;
- rvdev->dev.release = rproc_rvdev_release;
- dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
- dev_set_drvdata(&rvdev->dev, rvdev);
-
- ret = device_register(&rvdev->dev);
- if (ret) {
- put_device(&rvdev->dev);
- return ret;
+ pdev = platform_device_register_data(dev, "rproc-virtio", rvdev_data.index, &rvdev_data,
+ sizeof(rvdev_data));
+ if (IS_ERR(pdev)) {
+ dev_err(dev, "failed to create rproc-virtio device\n");
+ return PTR_ERR(pdev);
}
- ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent);
- if (ret)
- goto free_rvdev;
-
- /* Make device dma capable by inheriting from parent's capabilities */
- set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
-
- ret = dma_coerce_mask_and_coherent(&rvdev->dev,
- dma_get_mask(rproc->dev.parent));
- if (ret) {
- dev_warn(dev,
- "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
- dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
- }
-
- /* parse the vrings */
- for (i = 0; i < rsc->num_of_vrings; i++) {
- ret = rproc_parse_vring(rvdev, rsc, i);
- if (ret)
- goto free_rvdev;
- }
-
- /* remember the resource offset*/
- rvdev->rsc_offset = offset;
-
- /* allocate the vring resources */
- for (i = 0; i < rsc->num_of_vrings; i++) {
- ret = rproc_alloc_vring(rvdev, i);
- if (ret)
- goto unwind_vring_allocations;
- }
-
- list_add_tail(&rvdev->node, &rproc->rvdevs);
-
- rvdev->subdev.start = rproc_vdev_do_start;
- rvdev->subdev.stop = rproc_vdev_do_stop;
-
- rproc_add_subdev(rproc, &rvdev->subdev);
-
return 0;
-
-unwind_vring_allocations:
- for (i--; i >= 0; i--)
- rproc_free_vring(&rvdev->vring[i]);
-free_rvdev:
- device_unregister(&rvdev->dev);
- return ret;
-}
-
-void rproc_vdev_release(struct kref *ref)
-{
- struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
- struct rproc_vring *rvring;
- struct rproc *rproc = rvdev->rproc;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
- rvring = &rvdev->vring[id];
- rproc_free_vring(rvring);
- }
-
- rproc_remove_subdev(rproc, &rvdev->subdev);
- list_del(&rvdev->node);
- device_unregister(&rvdev->dev);
}
/**
@@ -1365,7 +1250,7 @@ void rproc_resource_cleanup(struct rproc *rproc)
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
- kref_put(&rvdev->refcount, rproc_vdev_release);
+ platform_device_unregister(rvdev->pdev);
rproc_coredump_cleanup(rproc);
}
@@ -1885,6 +1770,45 @@ static int __rproc_detach(struct rproc *rproc)
return 0;
}
+static int rproc_attach_recovery(struct rproc *rproc)
+{
+ int ret;
+
+ ret = __rproc_detach(rproc);
+ if (ret)
+ return ret;
+
+ return __rproc_attach(rproc);
+}
+
+static int rproc_boot_recovery(struct rproc *rproc)
+{
+ const struct firmware *firmware_p;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_stop(rproc, true);
+ if (ret)
+ return ret;
+
+ /* generate coredump */
+ rproc->ops->coredump(rproc);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ return ret;
+ }
+
+ /* boot the remote processor up again */
+ ret = rproc_start(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+
+ return ret;
+}
+
/**
* rproc_trigger_recovery() - recover a remoteproc
* @rproc: the remote processor
@@ -1899,7 +1823,6 @@ static int __rproc_detach(struct rproc *rproc)
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
- const struct firmware *firmware_p;
struct device *dev = &rproc->dev;
int ret;
@@ -1913,24 +1836,10 @@ int rproc_trigger_recovery(struct rproc *rproc)
dev_err(dev, "recovering %s\n", rproc->name);
- ret = rproc_stop(rproc, true);
- if (ret)
- goto unlock_mutex;
-
- /* generate coredump */
- rproc->ops->coredump(rproc);
-
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
- goto unlock_mutex;
- }
-
- /* boot the remote processor up again */
- ret = rproc_start(rproc, firmware_p);
-
- release_firmware(firmware_p);
+ if (rproc_has_feature(rproc, RPROC_FEAT_ATTACH_ON_RECOVERY))
+ ret = rproc_attach_recovery(rproc);
+ else
+ ret = rproc_boot_recovery(rproc);
unlock_mutex:
mutex_unlock(&rproc->lock);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 72d4d3d7d94d..d4dbb8d1d80c 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -24,16 +24,43 @@ struct rproc_debug_trace {
struct rproc_mem_entry trace_mem;
};
+/**
+ * struct rproc_vdev_data - remoteproc virtio device data
+ * @rsc_offset: offset of the vdev's resource entry
+ * @id: virtio device id (as in virtio_ids.h)
+ * @index: vdev position versus other vdev declared in resource table
+ * @rsc: pointer to the vdev resource entry. Valid only during vdev init as
+ * the resource can be cached by rproc.
+ */
+struct rproc_vdev_data {
+ u32 rsc_offset;
+ unsigned int id;
+ u32 index;
+ struct fw_rsc_vdev *rsc;
+};
+
+static inline bool rproc_has_feature(struct rproc *rproc, unsigned int feature)
+{
+ return test_bit(feature, rproc->features);
+}
+
+static inline int rproc_set_feature(struct rproc *rproc, unsigned int feature)
+{
+ if (feature >= RPROC_MAX_FEATURES)
+ return -EINVAL;
+
+ set_bit(feature, rproc->features);
+
+ return 0;
+}
+
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
-irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
-void rproc_vdev_release(struct kref *ref);
int rproc_of_parse_firmware(struct device *dev, int index,
const char **fw_name);
/* from remoteproc_virtio.c */
-int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
-int rproc_remove_virtio_dev(struct device *dev, void *data);
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
@@ -83,6 +110,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
+int rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
@@ -95,6 +123,8 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw);
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev);
+void rproc_remove_rvdev(struct rproc_vdev *rvdev);
static inline int rproc_prepare_device(struct rproc *rproc)
{
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 0f7706e23eb9..0e95525c1158 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -9,9 +9,12 @@
* Brian Swetland <swetland@google.com>
*/
+#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
+#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -23,9 +26,32 @@
#include "remoteproc_internal.h"
+static int copy_dma_range_map(struct device *to, struct device *from)
+{
+ const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
+ int num_ranges = 0;
+
+ if (!map)
+ return 0;
+
+ for (r = map; r->size; r++)
+ num_ranges++;
+
+ new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+ to->dma_range_map = new_map;
+ return 0;
+}
+
static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
+ struct platform_device *pdev;
+
+ pdev = container_of(vdev->dev.parent, struct platform_device, dev);
+
+ return platform_get_drvdata(pdev);
}
static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
@@ -322,13 +348,10 @@ static void rproc_virtio_dev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
- struct rproc *rproc = vdev_to_rproc(vdev);
kfree(vdev);
- kref_put(&rvdev->refcount, rproc_vdev_release);
-
- put_device(&rproc->dev);
+ put_device(&rvdev->pdev->dev);
}
/**
@@ -341,10 +364,10 @@ static void rproc_virtio_dev_release(struct device *dev)
*
* Return: 0 on success or an appropriate error value otherwise
*/
-int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
+static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = &rvdev->dev;
+ struct device *dev = &rvdev->pdev->dev;
struct virtio_device *vdev;
struct rproc_mem_entry *mem;
int ret;
@@ -414,18 +437,8 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
vdev->dev.parent = dev;
vdev->dev.release = rproc_virtio_dev_release;
- /*
- * We're indirectly making a non-temporary copy of the rproc pointer
- * here, because drivers probed with this vdev will indirectly
- * access the wrapping rproc.
- *
- * Therefore we must increment the rproc refcount here, and decrement
- * it _only_ when the vdev is released.
- */
- get_device(&rproc->dev);
-
/* Reference the vdev and vring allocations */
- kref_get(&rvdev->refcount);
+ get_device(dev);
ret = register_virtio_device(vdev);
if (ret) {
@@ -449,10 +462,142 @@ out:
*
* Return: 0
*/
-int rproc_remove_virtio_dev(struct device *dev, void *data)
+static int rproc_remove_virtio_dev(struct device *dev, void *data)
{
struct virtio_device *vdev = dev_to_virtio(dev);
unregister_virtio_device(vdev);
return 0;
}
+
+static int rproc_vdev_do_start(struct rproc_subdev *subdev)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+
+ return rproc_add_virtio_dev(rvdev, rvdev->id);
+}
+
+static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+ struct device *dev = &rvdev->pdev->dev;
+ int ret;
+
+ ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
+ if (ret)
+ dev_warn(dev, "can't remove vdev child device: %d\n", ret);
+}
+
+static int rproc_virtio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc_vdev_data *rvdev_data = dev->platform_data;
+ struct rproc_vdev *rvdev;
+ struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
+ struct fw_rsc_vdev *rsc;
+ int i, ret;
+
+ if (!rvdev_data)
+ return -EINVAL;
+
+ rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
+ if (!rvdev)
+ return -ENOMEM;
+
+ rvdev->id = rvdev_data->id;
+ rvdev->rproc = rproc;
+ rvdev->index = rvdev_data->index;
+
+ ret = copy_dma_range_map(dev, rproc->dev.parent);
+ if (ret)
+ return ret;
+
+ /* Make device dma capable by inheriting from parent's capabilities */
+ set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
+
+ ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
+ if (ret) {
+ dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
+ }
+
+ platform_set_drvdata(pdev, rvdev);
+ rvdev->pdev = pdev;
+
+ rsc = rvdev_data->rsc;
+
+ /* parse the vrings */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_parse_vring(rvdev, rsc, i);
+ if (ret)
+ return ret;
+ }
+
+ /* remember the resource offset*/
+ rvdev->rsc_offset = rvdev_data->rsc_offset;
+
+ /* allocate the vring resources */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_alloc_vring(rvdev, i);
+ if (ret)
+ goto unwind_vring_allocations;
+ }
+
+ rproc_add_rvdev(rproc, rvdev);
+
+ rvdev->subdev.start = rproc_vdev_do_start;
+ rvdev->subdev.stop = rproc_vdev_do_stop;
+
+ rproc_add_subdev(rproc, &rvdev->subdev);
+
+ /*
+ * We're indirectly making a non-temporary copy of the rproc pointer
+ * here, because the platform device or the vdev device will indirectly
+ * access the wrapping rproc.
+ *
+ * Therefore we must increment the rproc refcount here, and decrement
+ * it _only_ on platform remove.
+ */
+ get_device(&rproc->dev);
+
+ return 0;
+
+unwind_vring_allocations:
+ for (i--; i >= 0; i--)
+ rproc_free_vring(&rvdev->vring[i]);
+
+ return ret;
+}
+
+static int rproc_virtio_remove(struct platform_device *pdev)
+{
+ struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = rvdev->rproc;
+ struct rproc_vring *rvring;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
+ rvring = &rvdev->vring[id];
+ rproc_free_vring(rvring);
+ }
+
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ rproc_remove_rvdev(rvdev);
+
+ of_reserved_mem_device_release(&pdev->dev);
+ dma_release_coherent_memory(&pdev->dev);
+
+ put_device(&rproc->dev);
+
+ return 0;
+}
+
+/* Platform driver */
+static struct platform_driver rproc_virtio_driver = {
+ .probe = rproc_virtio_probe,
+ .remove = rproc_virtio_remove,
+ .driver = {
+ .name = "rproc-virtio",
+ },
+};
+builtin_platform_driver(rproc_virtio_driver);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 2a8238eb8794..de176c2fbad9 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -152,6 +152,13 @@ config RESET_PISTACHIO
help
This enables the reset driver for ImgTec Pistachio SoCs.
+config RESET_POLARFIRE_SOC
+ bool "Microchip PolarFire SoC (MPFS) Reset Driver"
+ depends on AUXILIARY_BUS && MCHP_CLK_MPFS
+ default MCHP_CLK_MPFS
+ help
+ This driver supports peripheral reset for the Microchip PolarFire SoC
+
config RESET_QCOM_AOSS
tristate "Qcom AOSS Reset Driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cd5cf8e7c6a7..3e7e5fd633a8 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_POLARFIRE_SOC) += reset-mpfs.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
@@ -40,4 +41,3 @@ obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
-
diff --git a/drivers/reset/reset-mpfs.c b/drivers/reset/reset-mpfs.c
new file mode 100644
index 000000000000..e003e50590ec
--- /dev/null
+++ b/drivers/reset/reset-mpfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PolarFire SoC (MPFS) Peripheral Clock Reset Controller
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/microchip,mpfs-clock.h>
+#include <soc/microchip/mpfs.h>
+
+/*
+ * The ENVM reset is the lowest bit in the register & I am using the CLK_FOO
+ * defines in the dt to make things easier to configure - so this is accounting
+ * for the offset of 3 there.
+ */
+#define MPFS_PERIPH_OFFSET CLK_ENVM
+#define MPFS_NUM_RESETS 30u
+#define MPFS_SLEEP_MIN_US 100
+#define MPFS_SLEEP_MAX_US 200
+
+/* block concurrent access to the soft reset register */
+static DEFINE_SPINLOCK(mpfs_reset_lock);
+
+/*
+ * Peripheral clock resets
+ */
+
+static int mpfs_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpfs_reset_lock, flags);
+
+ reg = mpfs_reset_read(rcdev->dev);
+ reg |= BIT(id);
+ mpfs_reset_write(rcdev->dev, reg);
+
+ spin_unlock_irqrestore(&mpfs_reset_lock, flags);
+
+ return 0;
+}
+
+static int mpfs_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpfs_reset_lock, flags);
+
+ reg = mpfs_reset_read(rcdev->dev);
+ reg &= ~BIT(id);
+ mpfs_reset_write(rcdev->dev, reg);
+
+ spin_unlock_irqrestore(&mpfs_reset_lock, flags);
+
+ return 0;
+}
+
+static int mpfs_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ u32 reg = mpfs_reset_read(rcdev->dev);
+
+ /*
+ * It is safe to return here as MPFS_NUM_RESETS makes sure the sign bit
+ * is never hit.
+ */
+ return (reg & BIT(id));
+}
+
+static int mpfs_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ mpfs_assert(rcdev, id);
+
+ usleep_range(MPFS_SLEEP_MIN_US, MPFS_SLEEP_MAX_US);
+
+ mpfs_deassert(rcdev, id);
+
+ return 0;
+}
+
+static const struct reset_control_ops mpfs_reset_ops = {
+ .reset = mpfs_reset,
+ .assert = mpfs_assert,
+ .deassert = mpfs_deassert,
+ .status = mpfs_status,
+};
+
+static int mpfs_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int index = reset_spec->args[0];
+
+ /*
+ * CLK_RESERVED does not map to a clock, but it does map to a reset,
+ * so it has to be accounted for here. It is the reset for the fabric,
+ * so if this reset gets called - do not reset it.
+ */
+ if (index == CLK_RESERVED) {
+ dev_err(rcdev->dev, "Resetting the fabric is not supported\n");
+ return -EINVAL;
+ }
+
+ if (index < MPFS_PERIPH_OFFSET || index >= (MPFS_PERIPH_OFFSET + rcdev->nr_resets)) {
+ dev_err(rcdev->dev, "Invalid reset index %u\n", index);
+ return -EINVAL;
+ }
+
+ return index - MPFS_PERIPH_OFFSET;
+}
+
+static int mpfs_reset_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct reset_controller_dev *rcdev;
+
+ rcdev = devm_kzalloc(dev, sizeof(*rcdev), GFP_KERNEL);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->dev = dev;
+ rcdev->dev->parent = dev->parent;
+ rcdev->ops = &mpfs_reset_ops;
+ rcdev->of_node = dev->parent->of_node;
+ rcdev->of_reset_n_cells = 1;
+ rcdev->of_xlate = mpfs_reset_xlate;
+ rcdev->nr_resets = MPFS_NUM_RESETS;
+
+ return devm_reset_controller_register(dev, rcdev);
+}
+
+static const struct auxiliary_device_id mpfs_reset_ids[] = {
+ {
+ .name = "clk_mpfs.reset-mpfs",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, mpfs_reset_ids);
+
+static struct auxiliary_driver mpfs_reset_driver = {
+ .probe = mpfs_reset_probe,
+ .id_table = mpfs_reset_ids,
+};
+
+module_auxiliary_driver(mpfs_reset_driver);
+
+MODULE_DESCRIPTION("Microchip PolarFire SoC Reset Driver");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MCHP_CLK_MPFS);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 4f2189111494..3e0b8f3496ed 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -76,7 +76,9 @@ int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
mutex_lock(&eptdev->ept_lock);
if (eptdev->ept) {
- rpmsg_destroy_ept(eptdev->ept);
+ /* The default endpoint is released by the rpmsg core */
+ if (!eptdev->default_ept)
+ rpmsg_destroy_ept(eptdev->ept);
eptdev->ept = NULL;
}
mutex_unlock(&eptdev->ept_lock);
@@ -424,15 +426,12 @@ int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent
struct rpmsg_channel_info chinfo)
{
struct rpmsg_eptdev *eptdev;
- int ret;
eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
if (IS_ERR(eptdev))
return PTR_ERR(eptdev);
- ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
-
- return ret;
+ return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
}
EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b8de25118ad0..bb63edb507da 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -423,6 +423,7 @@ config RTC_DRV_ISL1208
config RTC_DRV_ISL12022
tristate "Intersil ISL12022"
+ select REGMAP_I2C
help
If you say yes here you get support for the
Intersil ISL12022 RTC chip.
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index bdb1df843c78..610413b4e9ca 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1352,10 +1352,10 @@ static void cmos_check_acpi_rtc_status(struct device *dev,
static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
- cmos_wake_setup(&pnp->dev);
+ int irq, ret;
if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
- unsigned int irq = 0;
+ irq = 0;
#ifdef CONFIG_X86
/* Some machines contain a PNP entry for the RTC, but
* don't define the IRQ. It should always be safe to
@@ -1364,13 +1364,17 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
if (nr_legacy_irqs())
irq = RTC_IRQ;
#endif
- return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
} else {
- return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0),
- pnp_irq(pnp, 0));
+ irq = pnp_irq(pnp, 0);
}
+
+ ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ if (ret)
+ return ret;
+
+ cmos_wake_setup(&pnp->dev);
+
+ return 0;
}
static void cmos_pnp_remove(struct pnp_dev *pnp)
@@ -1454,10 +1458,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {}
static int __init cmos_platform_probe(struct platform_device *pdev)
{
struct resource *resource;
- int irq;
+ int irq, ret;
cmos_of_init(pdev);
- cmos_wake_setup(&pdev->dev);
if (RTC_IOMAPPED)
resource = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1467,7 +1470,13 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
if (irq < 0)
irq = -1;
- return cmos_do_probe(&pdev->dev, resource, irq);
+ ret = cmos_do_probe(&pdev->dev, resource, irq);
+ if (ret)
+ return ret;
+
+ cmos_wake_setup(&pdev->dev);
+
+ return 0;
}
static int cmos_platform_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index a24331ba8a5f..5db9c737c022 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -132,7 +132,7 @@ ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask)
}
/**
- * s1685_rtc_check_mday - check validity of the day of month.
+ * ds1685_rtc_check_mday - check validity of the day of month.
* @rtc: pointer to the ds1685 rtc structure.
* @mday: day of month.
*
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
index c2717bb52b2b..c828bc8e05b9 100644
--- a/drivers/rtc/rtc-gamecube.c
+++ b/drivers/rtc/rtc-gamecube.c
@@ -265,18 +265,17 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
* SRAM address as on previous consoles.
*/
ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
- if (ret) {
- pr_err("failed to get the RTC bias\n");
- iounmap(hw_srnprot);
- return -1;
- }
/* Reset SRAM access to how it was before, our job here is done. */
if (old != 0x7bf)
iowrite32be(old, hw_srnprot);
+
iounmap(hw_srnprot);
- return 0;
+ if (ret)
+ pr_err("failed to get the RTC bias\n");
+
+ return ret;
}
static const struct regmap_range rtc_rd_ranges[] = {
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 79461ded1a48..ca677c4265e6 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
@@ -42,83 +43,32 @@ static struct i2c_driver isl12022_driver;
struct isl12022 {
struct rtc_device *rtc;
-
- bool write_enabled; /* true if write enable is set */
+ struct regmap *regmap;
};
-
-static int isl12022_read_regs(struct i2c_client *client, uint8_t reg,
- uint8_t *data, size_t n)
-{
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = data
- }, /* setup read ptr */
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = n,
- .buf = data
- }
- };
-
- int ret;
-
- data[0] = reg;
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: read error, ret=%d\n",
- __func__, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-
-static int isl12022_write_reg(struct i2c_client *client,
- uint8_t reg, uint8_t val)
-{
- uint8_t data[2] = { reg, val };
- int err;
-
- err = i2c_master_send(client, data, sizeof(data));
- if (err != sizeof(data)) {
- dev_err(&client->dev,
- "%s: err=%d addr=%02x, data=%02x\n",
- __func__, err, data[0], data[1]);
- return -EIO;
- }
-
- return 0;
-}
-
-
/*
* In the routines that deal directly with the isl12022 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct isl12022 *isl12022 = dev_get_drvdata(dev);
+ struct regmap *regmap = isl12022->regmap;
uint8_t buf[ISL12022_REG_INT + 1];
int ret;
- ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf));
+ ret = regmap_bulk_read(regmap, ISL12022_REG_SC, buf, sizeof(buf));
if (ret)
return ret;
if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
- dev_warn(&client->dev,
+ dev_warn(dev,
"voltage dropped below %u%%, "
"date and time is not reliable.\n",
buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
}
- dev_dbg(&client->dev,
+ dev_dbg(dev,
"%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, "
"sr=%02x, int=%02x",
@@ -141,65 +91,25 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1;
tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100;
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ dev_dbg(dev, "%s: %ptR\n", __func__, tm);
return 0;
}
static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct isl12022 *isl12022 = i2c_get_clientdata(client);
- size_t i;
+ struct isl12022 *isl12022 = dev_get_drvdata(dev);
+ struct regmap *regmap = isl12022->regmap;
int ret;
uint8_t buf[ISL12022_REG_DW + 1];
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
-
- if (!isl12022->write_enabled) {
-
- ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1);
- if (ret)
- return ret;
-
- /* Check if WRTC (write rtc enable) is set factory default is
- * 0 (not set) */
- if (!(buf[0] & ISL12022_INT_WRTC)) {
- dev_info(&client->dev,
- "init write enable and 24 hour format\n");
-
- /* Set the write enable bit. */
- ret = isl12022_write_reg(client,
- ISL12022_REG_INT,
- buf[0] | ISL12022_INT_WRTC);
- if (ret)
- return ret;
-
- /* Write to any RTC register to start RTC, we use the
- * HR register, setting the MIL bit to use the 24 hour
- * format. */
- ret = isl12022_read_regs(client, ISL12022_REG_HR,
- buf, 1);
- if (ret)
- return ret;
-
- ret = isl12022_write_reg(client,
- ISL12022_REG_HR,
- buf[0] | ISL12022_HR_MIL);
- if (ret)
- return ret;
- }
-
- isl12022->write_enabled = true;
- }
+ dev_dbg(dev, "%s: %ptR\n", __func__, tm);
+
+ /* Ensure the write enable bit is set. */
+ ret = regmap_update_bits(regmap, ISL12022_REG_INT,
+ ISL12022_INT_WRTC, ISL12022_INT_WRTC);
+ if (ret)
+ return ret;
/* hours, minutes and seconds */
buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec);
@@ -216,15 +126,8 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
- /* write register's data */
- for (i = 0; i < ARRAY_SIZE(buf); i++) {
- ret = isl12022_write_reg(client, ISL12022_REG_SC + i,
- buf[ISL12022_REG_SC + i]);
- if (ret)
- return -EIO;
- }
-
- return 0;
+ return regmap_bulk_write(isl12022->regmap, ISL12022_REG_SC,
+ buf, sizeof(buf));
}
static const struct rtc_class_ops isl12022_rtc_ops = {
@@ -232,6 +135,12 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -243,13 +152,23 @@ static int isl12022_probe(struct i2c_client *client)
GFP_KERNEL);
if (!isl12022)
return -ENOMEM;
+ dev_set_drvdata(&client->dev, isl12022);
+
+ isl12022->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(isl12022->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(isl12022->regmap);
+ }
+
+ isl12022->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(isl12022->rtc))
+ return PTR_ERR(isl12022->rtc);
- i2c_set_clientdata(client, isl12022);
+ isl12022->rtc->ops = &isl12022_rtc_ops;
+ isl12022->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ isl12022->rtc->range_max = RTC_TIMESTAMP_END_2099;
- isl12022->rtc = devm_rtc_device_register(&client->dev,
- isl12022_driver.driver.name,
- &isl12022_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(isl12022->rtc);
+ return devm_rtc_register_device(isl12022->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 6e51df72fd65..c383719292c7 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -257,11 +257,6 @@ static void jz4740_rtc_power_off(void)
kernel_halt();
}
-static void jz4740_rtc_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
{ .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
@@ -329,23 +324,9 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(clk)) {
- dev_err(dev, "Failed to get RTC clock\n");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, jz4740_rtc_clk_disable, clk);
- if (ret) {
- dev_err(dev, "Failed to register devm action\n");
- return ret;
- }
+ clk = devm_clk_get_enabled(dev, "rtc");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get RTC clock\n");
spin_lock_init(&rtc->lock);
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
index f14d1925e0c9..2a479d44f198 100644
--- a/drivers/rtc/rtc-mpfs.c
+++ b/drivers/rtc/rtc-mpfs.c
@@ -193,23 +193,6 @@ static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(clk))
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
- return clk;
-}
-
static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
{
struct mpfs_rtc_dev *rtcdev = dev;
@@ -233,7 +216,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
{
struct mpfs_rtc_dev *rtcdev;
struct clk *clk;
- u32 prescaler;
+ unsigned long prescaler;
int wakeup_irq, ret;
rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
@@ -251,7 +234,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
/* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
- clk = mpfs_rtc_init_clk(&pdev->dev);
+ clk = devm_clk_get_enabled(&pdev->dev, "rtc");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -275,14 +258,13 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
/* prescaler hardware adds 1 to reg value */
prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
-
if (prescaler > MAX_PRESCALER_COUNT) {
- dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ dev_dbg(&pdev->dev, "invalid prescaler %lu\n", prescaler);
return -EINVAL;
}
writel(prescaler, rtcdev->base + PRESCALER_REG);
- dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+ dev_info(&pdev->dev, "prescaler set to: %lu\n", prescaler);
device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 53d4e253e81f..762cf03345f1 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -291,14 +291,6 @@ static const struct rtc_class_ops mxc_rtc_ops = {
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
-static void mxc_rtc_action(void *p)
-{
- struct rtc_plat_data *pdata = p;
-
- clk_disable_unprepare(pdata->clk_ref);
- clk_disable_unprepare(pdata->clk_ipg);
-}
-
static int mxc_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -341,33 +333,18 @@ static int mxc_rtc_probe(struct platform_device *pdev)
rtc->range_max = (1 << 16) * 86400ULL - 1;
}
- pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ pdata->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
if (IS_ERR(pdata->clk_ipg)) {
dev_err(&pdev->dev, "unable to get ipg clock!\n");
return PTR_ERR(pdata->clk_ipg);
}
- ret = clk_prepare_enable(pdata->clk_ipg);
- if (ret)
- return ret;
-
- pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
+ pdata->clk_ref = devm_clk_get_enabled(&pdev->dev, "ref");
if (IS_ERR(pdata->clk_ref)) {
- clk_disable_unprepare(pdata->clk_ipg);
dev_err(&pdev->dev, "unable to get ref clock!\n");
return PTR_ERR(pdata->clk_ref);
}
- ret = clk_prepare_enable(pdata->clk_ref);
- if (ret) {
- clk_disable_unprepare(pdata->clk_ipg);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata);
- if (ret)
- return ret;
-
rate = clk_get_rate(pdata->clk_ref);
if (rate == 32768)
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index cdc623b3e365..dd170e3efd83 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -521,10 +521,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
int ret;
+ u32 value;
switch(param->param) {
- u32 value;
-
case RTC_PARAM_BACKUP_SWITCH_MODE:
ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value);
if (ret < 0)
@@ -554,9 +553,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param)
static int rv3028_param_set(struct device *dev, struct rtc_param *param)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
+ u8 mode;
switch(param->param) {
- u8 mode;
case RTC_PARAM_BACKUP_SWITCH_MODE:
switch (param->uvalue) {
case RTC_BSM_DISABLED:
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 40c0f7ed36e0..aae40d20d086 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -107,6 +107,8 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
wdt_pdev->dev.parent = &rtc_pdev->dev;
wdt_pdev->dev.platform_data = &wdt_pdata;
rc = platform_device_add(wdt_pdev);
+ if (rc)
+ platform_device_put(wdt_pdev);
}
if (rc)
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
index 7a0f181d3fef..ba23163cc042 100644
--- a/drivers/rtc/rtc-ti-k3.c
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -45,14 +46,6 @@
#define K3RTC_MIN_OFFSET (-277761)
#define K3RTC_MAX_OFFSET (277778)
-/**
- * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
- * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
- */
-struct ti_k3_rtc_soc_data {
- const bool unlock_irq_erratum;
-};
-
static const struct regmap_config ti_k3_rtc_regmap_config = {
.name = "peripheral-registers",
.reg_bits = 32,
@@ -118,7 +111,6 @@ static const struct reg_field ti_rtc_reg_fields[] = {
* @rtc_dev: rtc device
* @regmap: rtc mmio regmap
* @r_fields: rtc register fields
- * @soc: SoC compatible match data
*/
struct ti_k3_rtc {
unsigned int irq;
@@ -127,7 +119,6 @@ struct ti_k3_rtc {
struct rtc_device *rtc_dev;
struct regmap *regmap;
struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
- const struct ti_k3_rtc_soc_data *soc;
};
static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
@@ -190,11 +181,22 @@ static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
/* Skip fence since we are going to check the unlock bit as fence */
ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
- !ret, 2, priv->sync_timeout_us);
+ ret, 2, priv->sync_timeout_us);
return ret;
}
+/*
+ * This is the list of SoCs affected by TI's i2327 errata causing the RTC
+ * state-machine to break if not unlocked fast enough during boot. These
+ * SoCs must have the bootloader unlock this device very early in the
+ * boot-flow before we (Linux) can use this device.
+ */
+static const struct soc_device_attribute has_erratum_i2327[] = {
+ { .family = "AM62X", .revision = "SR1.0" },
+ { /* sentinel */ }
+};
+
static int k3rtc_configure(struct device *dev)
{
int ret;
@@ -208,7 +210,7 @@ static int k3rtc_configure(struct device *dev)
*
* In such occurrence, it is assumed that the RTC module is unusable
*/
- if (priv->soc->unlock_irq_erratum) {
+ if (soc_device_match(has_erratum_i2327)) {
ret = k3rtc_check_unlocked(priv);
/* If there is an error OR if we are locked, return error */
if (ret) {
@@ -513,21 +515,12 @@ static struct nvmem_config ti_k3_rtc_nvmem_config = {
static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
{
- int ret;
struct clk *clk;
- clk = devm_clk_get(dev, "osc32k");
+ clk = devm_clk_get_enabled(dev, "osc32k");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
- if (ret)
- return ret;
-
priv->rate_32k = clk_get_rate(clk);
/* Make sure we are exact 32k clock. Else, try to compensate delay */
@@ -542,24 +535,19 @@ static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
*/
priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
- return ret;
+ return 0;
}
static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
{
- int ret;
struct clk *clk;
/* Note: VBUS isn't a context clock, it is needed for hardware operation */
- clk = devm_clk_get(dev, "vbus");
+ clk = devm_clk_get_enabled(dev, "vbus");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ return 0;
}
static int ti_k3_rtc_probe(struct platform_device *pdev)
@@ -602,8 +590,6 @@ static int ti_k3_rtc_probe(struct platform_device *pdev)
if (IS_ERR(priv->rtc_dev))
return PTR_ERR(priv->rtc_dev);
- priv->soc = of_device_get_match_data(dev);
-
priv->rtc_dev->ops = &ti_k3_rtc_ops;
priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
ti_k3_rtc_nvmem_config.priv = priv;
@@ -635,12 +621,8 @@ static int ti_k3_rtc_probe(struct platform_device *pdev)
return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
}
-static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
- .unlock_irq_erratum = true,
-};
-
static const struct of_device_id ti_k3_rtc_of_match_table[] = {
- {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {.compatible = "ti,am62-rtc" },
{}
};
MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 1beb596d1434..cb83f81da416 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strscpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 5ae64af9ccea..d4d31cd11d26 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5187705bd0f3..93b80da60277 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -614,7 +614,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = -ENAMETOOLONG;
goto seg_list_del;
}
- strlcpy(local_buf, buf, i + 1);
+ strscpy(local_buf, buf, i + 1);
dev_info->num_of_segments = num_of_segments;
rc = dcssblk_is_continuous(dev_info);
if (rc < 0)
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
index 1f5bdb237862..43df27ceec11 100644
--- a/drivers/s390/char/hmcdrv_cache.c
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -154,7 +154,7 @@ static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
- strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+ strscpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index b58df0dd0039..c21dc68e05a0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5c83f71c1d0e..26e3995ac062 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1760,7 +1760,7 @@ tty3270_flush_chars(struct tty_struct *tty)
* Check for visible/invisible input switches
*/
static void
-tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
+tty3270_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct tty3270 *tp;
int new;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 68f49e2e964c..131293f7f152 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -15,12 +15,14 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/diag.h>
+#include <asm/scsw.h>
#include "vmur.h"
@@ -78,6 +80,8 @@ static struct ccw_driver ur_driver = {
static DEFINE_MUTEX(vmur_mutex);
+static void ur_uevent(struct work_struct *ws);
+
/*
* Allocation, freeing, getting and putting of urdev structures
*
@@ -108,6 +112,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
+ INIT_WORK(&urd->uevent_work, ur_uevent);
spin_lock_init(&urd->open_lock);
refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
@@ -275,6 +280,18 @@ out:
return rc;
}
+static void ur_uevent(struct work_struct *ws)
+{
+ struct urdev *urd = container_of(ws, struct urdev, uevent_work);
+ char *envp[] = {
+ "EVENT=unsol_de", /* Unsolicited device-end interrupt */
+ NULL
+ };
+
+ kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp);
+ urdev_put(urd);
+}
+
/*
* ur interrupt handler, called from the ccw_device layer
*/
@@ -288,12 +305,21 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
}
+ urd = dev_get_drvdata(&cdev->dev);
if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n");
+
+ if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) {
+ /*
+ * Userspace might be interested in a transition to
+ * device-ready state.
+ */
+ urdev_get(urd);
+ schedule_work(&urd->uevent_work);
+ }
+
return;
}
- urd = dev_get_drvdata(&cdev->dev);
- BUG_ON(!urd);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
@@ -809,7 +835,6 @@ static int ur_probe(struct ccw_device *cdev)
rc = -ENOMEM;
goto fail_urdev_put;
}
- cdev->handler = ur_int_handler;
/* validate virtual unit record device */
urd->class = get_urd_class(urd);
@@ -823,6 +848,7 @@ static int ur_probe(struct ccw_device *cdev)
}
spin_lock_irq(get_ccwdev_lock(cdev));
dev_set_drvdata(&cdev->dev, urd);
+ cdev->handler = ur_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev));
mutex_unlock(&vmur_mutex);
@@ -928,6 +954,10 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
rc = -EBUSY;
goto fail_urdev_put;
}
+ if (cancel_work_sync(&urd->uevent_work)) {
+ /* Work not run yet - need to release reference here */
+ urdev_put(urd);
+ }
device_destroy(vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
@@ -963,6 +993,7 @@ static void ur_remove(struct ccw_device *cdev)
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
urdev_put(dev_get_drvdata(&cdev->dev));
dev_set_drvdata(&cdev->dev, NULL);
+ cdev->handler = NULL;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_unlock(&vmur_mutex);
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 608b0719ce17..92d17d7cb47b 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -13,6 +13,7 @@
#define _VMUR_H_
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
@@ -76,6 +77,7 @@ struct urdev {
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
+ struct work_struct uevent_work; /* work to send uevent */
};
/*
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f6da215ccf9f..6165e6aae762 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,6 +30,7 @@
#include <asm/checksum.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
+#include <asm/maccess.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 4bb7965daa0f..1a9714af51e4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -87,7 +87,7 @@ int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
debug_unregister(irq_ptr->debug_area);
return -ENOMEM;
}
- strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ strscpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
new_entry->dbf_info = irq_ptr->debug_area;
mutex_lock(&qdio_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qdio_dbf_list);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 86d9e428357b..7f5402fe857a 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mdev.h>
@@ -142,7 +141,6 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
- atomic_set(&private->avail, 1);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
@@ -203,7 +201,6 @@ static void vfio_ccw_free_private(struct vfio_ccw_private *private)
mutex_destroy(&private->io_mutex);
kfree(private);
}
-
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
@@ -222,7 +219,12 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
+ private->mdev_type.sysfs_name = "io";
+ private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
+ private->mdev_types[0] = &private->mdev_type;
+ ret = mdev_register_parent(&private->parent, &sch->dev,
+ &vfio_ccw_mdev_driver,
+ private->mdev_types, 1);
if (ret)
goto out_free;
@@ -241,7 +243,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- mdev_unregister_device(&sch->dev);
+ mdev_unregister_parent(&private->parent);
dev_set_drvdata(&sch->dev, NULL);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 4a806a2273b5..6ae4d012d800 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -11,7 +11,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include <linux/nospec.h>
#include <linux/slab.h>
@@ -45,47 +44,14 @@ static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
vfio_ccw_mdev_reset(private);
}
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
-}
-static MDEV_TYPE_ATTR_RO(name);
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
-}
-static MDEV_TYPE_ATTR_RO(device_api);
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
- dev_get_drvdata(mtype_get_parent_dev(mtype));
+ container_of(vdev, struct vfio_ccw_private, vdev);
- return sprintf(buf, "%d\n", atomic_read(&private->avail));
+ init_completion(&private->release_comp);
+ return 0;
}
-static MDEV_TYPE_ATTR_RO(available_instances);
-
-static struct attribute *mdev_types_attrs[] = {
- &mdev_type_attr_name.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_available_instances.attr,
- NULL,
-};
-
-static struct attribute_group mdev_type_group = {
- .name = "io",
- .attrs = mdev_types_attrs,
-};
-
-static struct attribute_group *mdev_type_groups[] = {
- &mdev_type_group,
- NULL,
-};
static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
{
@@ -95,12 +61,9 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
if (private->state == VFIO_CCW_STATE_NOT_OPER)
return -ENODEV;
- if (atomic_dec_if_positive(&private->avail) < 0)
- return -EPERM;
-
- memset(&private->vdev, 0, sizeof(private->vdev));
- vfio_init_group_dev(&private->vdev, &mdev->dev,
- &vfio_ccw_dev_ops);
+ ret = vfio_init_device(&private->vdev, &mdev->dev, &vfio_ccw_dev_ops);
+ if (ret)
+ return ret;
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
private->sch->schid.cssid,
@@ -109,16 +72,32 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
ret = vfio_register_emulated_iommu_dev(&private->vdev);
if (ret)
- goto err_atomic;
+ goto err_put_vdev;
dev_set_drvdata(&mdev->dev, private);
return 0;
-err_atomic:
- vfio_uninit_group_dev(&private->vdev);
- atomic_inc(&private->avail);
+err_put_vdev:
+ vfio_put_device(&private->vdev);
return ret;
}
+static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev)
+{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
+
+ /*
+ * We cannot free vfio_ccw_private here because it includes
+ * parent info which must be free'ed by css driver.
+ *
+ * Use a workaround by memset'ing the core device part and
+ * then notifying the remove path that all active references
+ * to this device have been released.
+ */
+ memset(vdev, 0, sizeof(*vdev));
+ complete(&private->release_comp);
+}
+
static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
@@ -130,8 +109,16 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
vfio_unregister_group_dev(&private->vdev);
- vfio_uninit_group_dev(&private->vdev);
- atomic_inc(&private->avail);
+ vfio_put_device(&private->vdev);
+ /*
+ * Wait for all active references on mdev are released so it
+ * is safe to defer kfree() to a later point.
+ *
+ * TODO: the clean fix is to split parent/mdev info from ccw
+ * private structure so each can be managed in its own life
+ * cycle.
+ */
+ wait_for_completion(&private->release_comp);
}
static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
@@ -592,6 +579,8 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
}
static const struct vfio_device_ops vfio_ccw_dev_ops = {
+ .init = vfio_ccw_mdev_init_dev,
+ .release = vfio_ccw_mdev_release_dev,
.open_device = vfio_ccw_mdev_open_device,
.close_device = vfio_ccw_mdev_close_device,
.read = vfio_ccw_mdev_read,
@@ -602,6 +591,8 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
};
struct mdev_driver vfio_ccw_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_CCW_STRING,
+ .max_instances = 1,
.driver = {
.name = "vfio_ccw_mdev",
.owner = THIS_MODULE,
@@ -609,5 +600,4 @@ struct mdev_driver vfio_ccw_mdev_driver = {
},
.probe = vfio_ccw_mdev_probe,
.remove = vfio_ccw_mdev_remove,
- .supported_type_groups = mdev_type_groups,
};
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index cd24b7fada91..bd5fb81456af 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include <asm/crw.h>
#include <asm/debug.h>
@@ -72,7 +73,6 @@ struct vfio_ccw_crw {
* @sch: pointer to the subchannel
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
- * @avail: available for creating a mediated device
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -88,13 +88,14 @@ struct vfio_ccw_crw {
* @req_trigger: eventfd ctx for signaling userspace to return device
* @io_work: work for deferral process of I/O handling
* @crw_work: work for deferral process of CRW handling
+ * @release_comp: synchronization helper for vfio device release
+ * @parent: parent data structures for mdevs created
*/
struct vfio_ccw_private {
struct vfio_device vdev;
struct subchannel *sch;
int state;
struct completion *completion;
- atomic_t avail;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -113,6 +114,12 @@ struct vfio_ccw_private {
struct eventfd_ctx *req_trigger;
struct work_struct io_work;
struct work_struct crw_work;
+
+ struct completion release_comp;
+
+ struct mdev_parent parent;
+ struct mdev_type mdev_type;
+ struct mdev_type *mdev_types[1];
} __aligned(8);
int vfio_ccw_sch_quiesce(struct subchannel *sch);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index ee82207b4e60..0b4cc8c597ae 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -684,42 +684,41 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
AP_DOMAINS);
}
-static int vfio_ap_mdev_probe(struct mdev_device *mdev)
+static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
{
- struct ap_matrix_mdev *matrix_mdev;
- int ret;
-
- if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
- return -EPERM;
-
- matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
- if (!matrix_mdev) {
- ret = -ENOMEM;
- goto err_dec_available;
- }
- vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
- &vfio_ap_matrix_dev_ops);
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- matrix_mdev->mdev = mdev;
+ matrix_mdev->mdev = to_mdev_device(vdev->dev);
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
hash_init(matrix_mdev->qtable.queues);
+ return 0;
+}
+
+static int vfio_ap_mdev_probe(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ int ret;
+
+ matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
+ &vfio_ap_matrix_dev_ops);
+ if (IS_ERR(matrix_mdev))
+ return PTR_ERR(matrix_mdev);
+
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
- goto err_list;
+ goto err_put_vdev;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
-err_list:
- vfio_uninit_group_dev(&matrix_mdev->vdev);
- kfree(matrix_mdev);
-err_dec_available:
- atomic_inc(&matrix_dev->available_instances);
+err_put_vdev:
+ vfio_put_device(&matrix_mdev->vdev);
return ret;
}
@@ -766,6 +765,11 @@ static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
}
}
+static void vfio_ap_mdev_release_dev(struct vfio_device *vdev)
+{
+ vfio_free_device(vdev);
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
@@ -779,54 +783,9 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
- vfio_uninit_group_dev(&matrix_mdev->vdev);
- kfree(matrix_mdev);
- atomic_inc(&matrix_dev->available_instances);
+ vfio_put_device(&matrix_mdev->vdev);
}
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
-}
-
-static MDEV_TYPE_ATTR_RO(name);
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n",
- atomic_read(&matrix_dev->available_instances));
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
-}
-
-static MDEV_TYPE_ATTR_RO(device_api);
-
-static struct attribute *vfio_ap_mdev_type_attrs[] = {
- &mdev_type_attr_name.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_available_instances.attr,
- NULL,
-};
-
-static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
- .name = VFIO_AP_MDEV_TYPE_HWVIRT,
- .attrs = vfio_ap_mdev_type_attrs,
-};
-
-static struct attribute_group *vfio_ap_mdev_type_groups[] = {
- &vfio_ap_mdev_hwvirt_type_group,
- NULL,
-};
-
#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
"already assigned to %s"
@@ -1824,6 +1783,8 @@ static const struct attribute_group vfio_queue_attr_group = {
};
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
+ .init = vfio_ap_mdev_init_dev,
+ .release = vfio_ap_mdev_release_dev,
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
@@ -1831,6 +1792,8 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
};
static struct mdev_driver vfio_ap_matrix_driver = {
+ .device_api = VFIO_DEVICE_API_AP_STRING,
+ .max_instances = MAX_ZDEV_ENTRIES_EXT,
.driver = {
.name = "vfio_ap_mdev",
.owner = THIS_MODULE,
@@ -1839,20 +1802,22 @@ static struct mdev_driver vfio_ap_matrix_driver = {
},
.probe = vfio_ap_mdev_probe,
.remove = vfio_ap_mdev_remove,
- .supported_type_groups = vfio_ap_mdev_type_groups,
};
int vfio_ap_mdev_register(void)
{
int ret;
- atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
-
ret = mdev_register_driver(&vfio_ap_matrix_driver);
if (ret)
return ret;
- ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
+ matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
+ matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
+ matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
+ ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
+ &vfio_ap_matrix_driver,
+ matrix_dev->mdev_types, 1);
if (ret)
goto err_driver;
return 0;
@@ -1864,7 +1829,7 @@ err_driver:
void vfio_ap_mdev_unregister(void)
{
- mdev_unregister_device(&matrix_dev->device);
+ mdev_unregister_parent(&matrix_dev->parent);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index d782cf463eab..2eddd5f34ed3 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -13,7 +13,6 @@
#define _VFIO_AP_PRIVATE_H_
#include <linux/types.h>
-#include <linux/device.h>
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -30,7 +29,6 @@
* struct ap_matrix_dev - Contains the data for the matrix device.
*
* @device: generic device structure associated with the AP matrix device
- * @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
* @mdevs_lock: mutex for locking the AP matrix device. This lock will be
@@ -47,12 +45,14 @@
*/
struct ap_matrix_dev {
struct device device;
- atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
struct mutex guests_lock; /* serializes access to each KVM guest */
+ struct mdev_parent parent;
+ struct mdev_type mdev_type;
+ struct mdev_type *mdev_types[];
};
extern struct ap_matrix_dev *matrix_dev;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e0fdd54bfeb7..37b551bd43bf 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1566,7 +1566,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_dev;
}
- strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+ strscpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 98c4864932d2..0ff61d00feb1 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -28,7 +28,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
return NULL;
}
- strlcpy(this->name, name, sizeof(this->name));
+ strscpy(this->name, name, sizeof(this->name));
init_waitqueue_head(&this->wait_q);
f = kzalloc(sizeof(fsm), order);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 9eba0a32e9f9..e250f49535fa 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -188,9 +188,9 @@ static void qeth_get_drvinfo(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
+ strscpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
- strlcpy(info->fw_version, card->info.mcl_level,
+ strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index fd2f1c31bd21..df782646e856 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -103,7 +103,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strscpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index b61acbb09be3..77917b339870 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -885,7 +885,7 @@ static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
- strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ strscpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index cd823ff5deab..6cb9cca9565b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2006,7 +2006,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a853c5497af6..ffdecb12d654 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -912,7 +912,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
retval = -ENOMEM;
goto out;
@@ -921,7 +921,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
tw_ioctl = (TW_New_Ioctl *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl) - 1))
+ if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl)))
goto out2;
passthru = (TW_Passthru *)&tw_ioctl->firmware_command;
@@ -966,15 +966,15 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Load the sg list */
switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) {
case 2:
- tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted;
break;
case 3:
- tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted;
break;
case 5:
- passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl);
passthru->sg_list[0].length = data_buffer_length_adjusted;
break;
}
@@ -1017,12 +1017,12 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
}
/* Now copy the response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length - 1))
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length))
goto out2;
retval = 0;
out2:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), cpu_addr, dma_handle);
out:
mutex_unlock(&tw_dev->ioctl_lock);
mutex_unlock(&tw_mutex);
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index e8f3f081b7d8..120a087bdf3c 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -348,7 +348,7 @@ typedef struct TAG_TW_New_Ioctl {
unsigned int data_buffer_length;
unsigned char padding [508];
TW_Command firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_New_Ioctl;
/* GetParam descriptor */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 955cb69a5418..03e71e3d5e5b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -2,9 +2,10 @@
menu "SCSI device support"
config SCSI_MOD
- tristate
- default y if SCSI=n || SCSI=y
- default m if SCSI=m
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+ depends on BLOCK
config RAID_ATTRS
tristate "RAID Transport Class"
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 928099163f0f..f2f3405cdec5 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -194,7 +194,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 9aafe0002ab1..05e1a63e00c3 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1366,9 +1366,9 @@ csio_show_hw_state(struct device *dev,
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (csio_is_hw_ready(hw))
- return snprintf(buf, PAGE_SIZE, "ready\n");
- else
- return snprintf(buf, PAGE_SIZE, "not ready\n");
+ return sysfs_emit(buf, "ready\n");
+
+ return sysfs_emit(buf, "not ready\n");
}
/* Device reset */
@@ -1430,7 +1430,7 @@ csio_show_dbg_level(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+ return sysfs_emit(buf, "%x\n", ln->params.log_level);
}
/* Store debug level */
@@ -1476,7 +1476,7 @@ csio_show_num_reg_rnodes(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+ return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes);
}
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 53d91bf9c12a..c07d2e3b4bcf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -254,7 +254,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -282,7 +282,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else {
struct cpl_t6_act_open_req *req =
(struct cpl_t6_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index e7be95ee7d64..cd1324ec742d 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -132,7 +132,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
break;
case SISL_AFU_RC_OUT_OF_DATA_BUFS:
/* Retry */
- scp->result = (DID_ALLOC_FAILURE << 16);
+ scp->result = (DID_ERROR << 16);
break;
default:
scp->result = (DID_ERROR << 16);
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index ff2ad9b38575..dd3437412ffc 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -831,6 +831,7 @@ struct __packed atto_hba_trace {
u32 total_length;
u32 trace_mask;
u8 reserved2[48];
+ u8 contents[];
};
#define ATTO_FUNC_SCSI_PASS_THRU 0x04
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 08f4e43c7d9e..e003d923acbf 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -947,10 +947,9 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break;
}
- memcpy(trc + 1,
+ memcpy(trc->contents,
a->fw_coredump_buff + offset,
len);
-
hi->data_length = len;
} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
memset(a->fw_coredump_buff, 0,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 39e16eab47aa..ddc048069af2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2233,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
fip->probe_tries++;
- wait = prandom_u32() % FIP_VN_PROBE_WAIT;
+ wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
} else
wait = FIP_VN_RLIM_INT;
mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
@@ -3125,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_BEACON_INT +
- (prandom_u32() % FIP_VN_BEACON_FUZZ));
+ prandom_u32_max(FIP_VN_BEACON_FUZZ));
}
if (time_before(fip->port_ka_time, next_time))
next_time = fip->port_ka_time;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 24c83bc4f5dc..9aebf4a26b13 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -649,6 +649,7 @@ extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
int enable);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
gfp_t gfp_flags);
+extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 33af5b8dede2..699b07abb6b0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1341,6 +1341,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
{
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_port *_sas_port = NULL;
int phy_no;
@@ -1369,6 +1370,12 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
}
}
+ /*
+ * Ensure any bcast events are processed prior to calling async nexus
+ * reset calls from hisi_sas_clear_nexus_ha() ->
+ * hisi_sas_async_I_T_nexus_reset()
+ */
+ sas_drain_work(sas_ha);
}
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
@@ -1527,9 +1534,9 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_done(hisi_hba);
- clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
dev_info(dev, "controller reset complete\n");
return 0;
@@ -1816,12 +1823,14 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
ASYNC_DOMAIN_EXCLUSIVE(async);
- int i;
+ int i, ret;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (!r.done)
- return TMF_RESP_FUNC_FAILED;
+ if (!r.done) {
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
@@ -1838,7 +1847,9 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
- return TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
+out:
+ return ret;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1982,6 +1993,22 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
+{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct sas_ha_struct *sha = &hisi_hba->sha;
+
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ return;
+
+ if (test_bit(SAS_HA_FROZEN, &sha->state))
+ return;
+
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
+
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 349546bacb2b..d643c5a49aa9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1412,9 +1412,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ hisi_sas_phy_bcast(phy);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index c37027276162..cded42f4ca44 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2811,15 +2811,12 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index d716e5632d0f..d56b4bfd2767 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1626,15 +1626,12 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2786,7 +2783,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
struct hisi_hba *hisi_hba = shost_priv(shost);
int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- unsigned int max_sectors;
if (ret)
return ret;
@@ -2802,12 +2798,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
}
}
- /* Set according to IOMMU IOVA caching limit */
- max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
- (PAGE_SIZE * 32) >> SECTOR_SHIFT);
-
- blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
-
return 0;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a47bcce3c9c7..f8e832b1bc46 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6233,8 +6233,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
- set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ set_bit(i, h->cmd_pool_bits);
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
@@ -6261,8 +6260,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
}
}
@@ -8030,7 +8028,7 @@ out_disable:
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
- kfree(h->cmd_pool_bits);
+ bitmap_free(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
dma_free_coherent(&h->pdev->dev,
@@ -8052,9 +8050,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
{
- h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
- sizeof(unsigned long),
- GFP_KERNEL);
+ h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL);
h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
&h->cmd_pool_dhandle, GFP_KERNEL);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index f18b770626e6..7e8903718245 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1044,10 +1044,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
- req->header.size = cpu_to_le32(
- sizeof(struct hpt_iop_request_scsi_command)
- - sizeof(struct hpt_iopsg)
- + sg_count * sizeof(struct hpt_iopsg));
+ req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count));
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
hba->ops->post_req(hba, _req);
@@ -1397,8 +1394,8 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
host->max_cmd_len = 16;
- req_size = sizeof(struct hpt_iop_request_scsi_command)
- + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+ req_size = struct_size((struct hpt_iop_request_scsi_command *)0,
+ sg_list, hba->max_sg_descriptors);
if ((req_size & 0x1f) != 0)
req_size = (req_size + 0x1f) & ~0x1f;
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 363d5a16243f..394ef6aa469e 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -228,7 +228,7 @@ struct hpt_iop_request_scsi_command {
u8 pad1;
u8 cdb[16];
__le32 dataxfer_length;
- struct hpt_iopsg sg_list[1];
+ struct hpt_iopsg sg_list[];
};
struct hpt_iop_request_ioctl_command {
@@ -237,7 +237,7 @@ struct hpt_iop_request_ioctl_command {
__le32 inbuf_size;
__le32 outbuf_size;
__le32 bytes_returned;
- u8 buf[1];
+ u8 buf[];
/* out data should be put at buf[(inbuf_size+3)&~3] */
};
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index eee1a24f7e15..e8770310a64b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -444,7 +444,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
break;
/*
- * Can transition from this state to to unconfiguring
+ * Can transition from this state to unconfiguring
* or err disconnect.
*/
case ERR_DISCONNECT_RECONNECT:
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index f585d6e5fab9..375261d67619 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1166,7 +1166,7 @@ static void tulip_scsi(struct initio_host * host)
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((scb = host->active) != NULL)
+ if (host->active)
initio_next_state(host);
return;
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 29b1bd755afe..5fb1f364e815 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -595,6 +595,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
+ mutex_init(&tcp_sw_conn->sock_lock);
+
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto free_conn;
@@ -629,11 +631,15 @@ free_conn:
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
+ /*
+ * The iscsi transport class will make sure we are not called in
+ * parallel with start, stop, bind and destroys. However, this can be
+ * called twice if userspace does a stop then a destroy.
+ */
if (!sock)
return;
@@ -649,9 +655,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_suspend_rx(conn);
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
sockfd_put(sock);
}
@@ -703,7 +709,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -723,10 +728,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -763,8 +768,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ if (!tcp_sw_conn->sock) {
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ return -ENOTCONN;
+ }
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ mutex_unlock(&tcp_sw_conn->sock_lock);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -779,8 +791,8 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
struct sockaddr_in6 addr;
struct socket *sock;
int rc;
@@ -790,21 +802,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
- if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ if (!conn->session->leadconn) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
- sock = tcp_sw_conn->sock;
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&conn->session->frwd_lock);
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock) {
+ rc = -ENOTCONN;
+ goto sock_unlock;
+ }
+
if (param == ISCSI_PARAM_LOCAL_PORT)
rc = kernel_getsockname(sock,
(struct sockaddr *)&addr);
else
rc = kernel_getpeername(sock,
(struct sockaddr *)&addr);
- sock_put(sock->sk);
+sock_unlock:
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -842,17 +869,21 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- sock = tcp_sw_conn->sock;
- if (!sock) {
- spin_unlock_bh(&session->frwd_lock);
- return -ENOTCONN;
- }
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- rc = kernel_getsockname(sock,
- (struct sockaddr *)&addr);
- sock_put(sock->sk);
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock)
+ rc = -ENOTCONN;
+ else
+ rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 850a018aefb9..68e14a344904 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,9 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ /* Taken when accessing the sock from the netlink/sysfs interface */
+ struct mutex sock_lock;
+
struct work_struct recvwork;
bool queue_recv;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fa2209080cc2..5ce251830104 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -67,7 +67,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e6a083d098a1..9ad233b40a9e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -68,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
-#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
- collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -405,6 +403,7 @@ struct lpfc_trunk_link {
link1,
link2,
link3;
+ u32 phy_lnk_speed;
};
/* Format of congestion module parameters */
@@ -732,8 +731,6 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
@@ -1436,13 +1433,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- /* data structure used for latency data collection */
-#define LPFC_NO_BUCKET 0
-#define LPFC_LINEAR_BUCKET 1
-#define LPFC_POWER2_BUCKET 2
- uint8_t bucket_type;
- uint32_t bucket_base;
- uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
@@ -1564,16 +1554,13 @@ struct lpfc_hba {
/* cgn_reg_signal and cgn_init_reg_signal use
* enum fc_edc_cg_signal_cap_types
*/
- u16 cgn_fpin_frequency;
+ u16 cgn_fpin_frequency; /* In units of msecs */
#define LPFC_FPIN_INIT_FREQ 0xffff
u32 cgn_sig_freq;
u32 cgn_acqe_cnt;
/* RX monitor handling for CMF */
- struct rxtable_entry *rxtable; /* RX_monitor information */
- atomic_t rxtable_idx_head;
-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
- atomic_t rxtable_idx_tail;
+ struct lpfc_rx_info_monitor *rx_monitor;
atomic_t rx_max_read_cnt; /* Maximum read bytes */
uint64_t rx_block_cnt;
@@ -1610,10 +1597,11 @@ struct lpfc_hba {
char os_host_name[MAXHOSTNAMELEN];
- /* SCSI host template information - for physical port */
- struct scsi_host_template port_template;
- /* SCSI host template information - for all vports */
- struct scsi_host_template vport_template;
+ /* LD Signaling */
+ u32 degrade_activate_threshold;
+ u32 degrade_deactivate_threshold;
+ u32 fec_degrade_interval;
+
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;
@@ -1622,7 +1610,7 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
-struct rxtable_entry {
+struct rx_info_entry {
uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
@@ -1637,6 +1625,13 @@ struct rxtable_entry {
uint32_t timer_interval;
};
+struct lpfc_rx_info_monitor {
+ struct rx_info_entry *ring; /* info organized in a circular buffer */
+ u32 head_idx, tail_idx; /* index to head/tail of ring */
+ spinlock_t lock; /* spinlock for ring */
+ u32 entries; /* storing number entries/size of ring */
+};
+
static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport *vport)
{
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 09cf2cd0ae60..ef1481326fd7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4093,333 +4093,6 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
*/
static DEVICE_ATTR_RO(lpfc_static_vport);
-/**
- * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- * @count: Size of the data buffer.
- *
- * This function get called when a user write to the lpfc_stat_data_ctrl
- * sysfs file. This function parse the command written to the sysfs file
- * and take appropriate action. These commands are used for controlling
- * driver statistical data collection.
- * Following are the command this function handles.
- *
- * setbucket <bucket_type> <base> <step>
- * = Set the latency buckets.
- * destroybucket = destroy all the buckets.
- * start = start data collection
- * stop = stop data collection
- * reset = reset the collected data
- **/
-static ssize_t
-lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-#define LPFC_MAX_DATA_CTRL_LEN 1024
- static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
- unsigned long i;
- char *str_ptr, *token;
- struct lpfc_vport **vports;
- struct Scsi_Host *v_shost;
- char *bucket_type_str, *base_str, *step_str;
- unsigned long base, step, bucket_type;
-
- if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
- if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
- return -EINVAL;
-
- strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
- str_ptr = &bucket_data[0];
- /* Ignore this token - this is command token */
- token = strsep(&str_ptr, "\t ");
- if (!token)
- return -EINVAL;
-
- bucket_type_str = strsep(&str_ptr, "\t ");
- if (!bucket_type_str)
- return -EINVAL;
-
- if (!strncmp(bucket_type_str, "linear", strlen("linear")))
- bucket_type = LPFC_LINEAR_BUCKET;
- else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
- bucket_type = LPFC_POWER2_BUCKET;
- else
- return -EINVAL;
-
- base_str = strsep(&str_ptr, "\t ");
- if (!base_str)
- return -EINVAL;
- base = simple_strtoul(base_str, NULL, 0);
-
- step_str = strsep(&str_ptr, "\t ");
- if (!step_str)
- return -EINVAL;
- step = simple_strtoul(step_str, NULL, 0);
- if (!step)
- return -EINVAL;
-
- /* Block the data collection for every vport */
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(v_shost->host_lock);
- /* Block and reset data collection */
- vports[i]->stat_data_blocked = 1;
- if (vports[i]->stat_data_enabled)
- lpfc_vport_reset_stat_data(vports[i]);
- spin_unlock_irq(v_shost->host_lock);
- }
-
- /* Set the bucket attributes */
- phba->bucket_type = bucket_type;
- phba->bucket_base = base;
- phba->bucket_step = step;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
-
- /* Unblock data collection */
- spin_lock_irq(v_shost->host_lock);
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(v_shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->stat_data_blocked = 1;
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- phba->bucket_type = LPFC_NO_BUCKET;
- phba->bucket_base = 0;
- phba->bucket_step = 0;
- return strlen(buf);
- }
-
- if (!strncmp(buf, "start", strlen("start"))) {
- /* If no buckets configured return error */
- if (phba->bucket_type == LPFC_NO_BUCKET)
- return -EINVAL;
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_alloc_bucket(vport);
- vport->stat_data_enabled = 1;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "stop", strlen("stop"))) {
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled == 0) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "reset", strlen("reset"))) {
- if ((phba->bucket_type == LPFC_NO_BUCKET)
- || !vport->stat_data_enabled)
- return strlen(buf);
- spin_lock_irq(shost->host_lock);
- vport->stat_data_blocked = 1;
- lpfc_vport_reset_stat_data(vport);
- vport->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- return -EINVAL;
-}
-
-
-/**
- * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- *
- * This function is the read call back function for
- * lpfc_stat_data_ctrl sysfs file. This function report the
- * current statistical data collection state.
- **/
-static ssize_t
-lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int index = 0;
- int i;
- char *bucket_type;
- unsigned long bucket_value;
-
- switch (phba->bucket_type) {
- case LPFC_LINEAR_BUCKET:
- bucket_type = "linear";
- break;
- case LPFC_POWER2_BUCKET:
- bucket_type = "power2";
- break;
- default:
- bucket_type = "No Bucket";
- break;
- }
-
- sprintf(&buf[index], "Statistical Data enabled :%d, "
- "blocked :%d, Bucket type :%s, Bucket base :%d,"
- " Bucket step :%d\nLatency Ranges :",
- vport->stat_data_enabled, vport->stat_data_blocked,
- bucket_type, phba->bucket_base, phba->bucket_step);
- index = strlen(buf);
- if (phba->bucket_type != LPFC_NO_BUCKET) {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- if (phba->bucket_type == LPFC_LINEAR_BUCKET)
- bucket_value = phba->bucket_base +
- phba->bucket_step * i;
- else
- bucket_value = phba->bucket_base +
- (1 << i) * phba->bucket_step;
-
- if (index + 10 > PAGE_SIZE)
- break;
- sprintf(&buf[index], "%08ld ", bucket_value);
- index = strlen(buf);
- }
- }
- sprintf(&buf[index], "\n");
- return strlen(buf);
-}
-
-/*
- * Sysfs attribute to control the statistical data collection.
- */
-static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
-
-/*
- * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
- */
-
-/*
- * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
- * for each target.
- */
-#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
-#define MAX_STAT_DATA_SIZE_PER_TARGET \
- STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
-
-
-/**
- * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
- * @filp: sysfs file
- * @kobj: Pointer to the kernel object
- * @bin_attr: Attribute object
- * @buf: Buffer pointer
- * @off: File offset
- * @count: Buffer size
- *
- * This function is the read call back function for lpfc_drvr_stat_data
- * sysfs file. This function export the statistical data to user
- * applications.
- **/
-static ssize_t
-sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device,
- kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int i = 0, index = 0;
- unsigned long nport_index;
- struct lpfc_nodelist *ndlp = NULL;
- nport_index = (unsigned long)off /
- MAX_STAT_DATA_SIZE_PER_TARGET;
-
- if (!vport->stat_data_enabled || vport->stat_data_blocked
- || (phba->bucket_type == LPFC_NO_BUCKET))
- return 0;
-
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!ndlp->lat_data)
- continue;
-
- if (nport_index > 0) {
- nport_index--;
- continue;
- }
-
- if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
- > count)
- break;
-
- if (!ndlp->lat_data)
- continue;
-
- /* Print the WWN */
- sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
-
- index = strlen(buf);
-
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- sprintf(&buf[index], "%010u,",
- ndlp->lat_data[i].cmd_count);
- index = strlen(buf);
- }
- sprintf(&buf[index], "\n");
- index = strlen(buf);
- }
- spin_unlock_irq(shost->host_lock);
- return index;
-}
-
-static struct bin_attribute sysfs_drvr_stat_data_attr = {
- .attr = {
- .name = "lpfc_drvr_stat_data",
- .mode = S_IRUSR,
- },
- .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
- .read = sysfs_drvr_stat_data_read,
- .write = NULL,
-};
-
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -6273,7 +5946,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_xlane_priority.attr,
&dev_attr_lpfc_sg_seg_cnt.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_aer_support.attr,
&dev_attr_lpfc_aer_state_cleanup.attr,
&dev_attr_lpfc_sriov_nr_virtfn.attr,
@@ -6332,7 +6004,6 @@ static struct attribute *lpfc_vport_attrs[] = {
&dev_attr_npiv_info.attr,
&dev_attr_lpfc_enable_da_id.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_static_vport.attr,
&dev_attr_cmf_info.attr,
NULL,
@@ -6545,17 +6216,14 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
-
/* Virtual ports do not need ctrl_reg and mbox */
- if (error || vport->port_type == LPFC_NPIV_PORT)
- goto out;
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return 0;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out_remove_stat_attr;
+ goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -6565,9 +6233,6 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
-out_remove_stat_attr:
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
out:
return error;
}
@@ -6580,8 +6245,7 @@ void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
+
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 9be3bb01a8ec..ac0c7ccf2eae 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1977,8 +1977,6 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
@@ -1988,8 +1986,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bcad91204328..d2d207791056 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -78,6 +78,7 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+int lpfc_read_lds_params(struct lpfc_hba *phba);
uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
void lpfc_cmf_signal_init(struct lpfc_hba *phba);
void lpfc_cmf_start(struct lpfc_hba *phba);
@@ -92,6 +93,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
void lpfc_unblock_requests(struct lpfc_hba *phba);
void lpfc_block_requests(struct lpfc_hba *phba);
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries);
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry);
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -454,6 +463,7 @@ extern const struct attribute_group *lpfc_hba_groups[];
extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_nvme;
+extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 13dfe285493d..75fd2bfc212b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1509,7 +1509,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *CTrsp;
int did;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_nodelist *ns_ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
uint32_t fc4_data_0, fc4_data_1;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
u32 ulp_word4 = get_job_word4(phba, rspiocb);
@@ -1522,15 +1522,12 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
- if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"9046 Event tag mismatch. Ignoring NS rsp\n");
goto out;
}
- /* Preserve the nameserver node to release the reference. */
- ns_ndlp = cmdiocb->ndlp;
-
if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
@@ -2504,420 +2501,298 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
}
}
-/* Routines for all individual HBA attributes */
-static int
-lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_fdmi_attr_u32 *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ ae->value_u32 = cpu_to_be32(attrval);
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-static int
-lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+
+static inline int
+lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_wwn *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(ae->name, wwn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- /* This string MUST be consistent with other FC platforms
- * supported by Broadcom.
- */
- strncpy(ae->un.AttrString,
- "Emulex Corporation",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
+ struct lpfc_name *wwnn, struct lpfc_name *wwpn)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fullwwn *ae = attr;
+ u8 *nname = ae->nname;
+ u8 *pname = ae->pname;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(nname, wwnn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
+ memcpy(pname, wwpn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- strncpy(ae->un.AttrString, phba->SerialNumber,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_string *ae = attr;
+ int len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ /*
+ * We are trusting the caller that if a fdmi string field
+ * is capped at 64 bytes, the caller passes in a string of
+ * 64 bytes or less.
+ */
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ len = strnlen(ae->value_string, sizeof(ae->value_string));
+ /* round string length to a 32bit boundary. Ensure there's a NULL */
len += (len & 3) ? (4 - (len & 3)) : 4;
+ /* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL);
+
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
return size;
}
-static int
-lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+/* Bitfields for FC4 Types that can be reported */
+#define ATTR_FC4_CT 0x00000001
+#define ATTR_FC4_FCP 0x00000002
+#define ATTR_FC4_NVME 0x00000004
+
+static inline int
+lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fc4types *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
+ if (typemask & ATTR_FC4_FCP)
+ ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
+
+ if (typemask & ATTR_FC4_CT)
+ ae->value_types[7] = 0x01; /* Type 0x20 - CT */
+
+ if (typemask & ATTR_FC4_NVME)
+ ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
- strncpy(ae->un.AttrString, phba->ModelDesc,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
return size;
}
+/* Routines for all individual HBA attributes */
static int
-lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- lpfc_vpd_t *vp = &phba->vpd;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t i, j, incr, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- /* Convert JEDEC ID to ascii for hardware version */
- incr = vp->rev.biuRev;
- for (i = 0; i < 8; i++) {
- j = (incr & 0xf);
- if (j <= 9)
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
- else
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
- incr = (incr >> 4);
- }
- size = FOURBYTES + 8;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
+ "Emulex Corporation");
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
+{
+ struct lpfc_hba *phba = vport->phba;
- strncpy(ae->un.AttrString, lpfc_release_version,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
+ phba->SerialNumber);
}
static int
-lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- else
- strncpy(ae->un.AttrString, phba->OptionROMVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
+ phba->ModelDesc);
}
static int
-lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+ char buf[16] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
+ return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
+}
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
+ lpfc_release_version);
}
static int
-lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_decode_firmware_rev(phba, buf, 1);
- ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ buf);
+ }
+
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ phba->OptionROMVersion);
}
static int
-lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_decode_firmware_rev(phba, buf, 1);
- len = lpfc_vport_symbolic_node_name(vport,
- ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
+ snprintf(buf, sizeof(buf), "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
- /* Nothing is defined for this currently */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
- /* Each driver instance corresponds to a single port */
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
+ LPFC_MAX_CT_SIZE);
}
static int
-lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
- memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
}
static int
-lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
+{
+ /* Each driver instance corresponds to a single port */
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
+}
- strlcat(ae->un.AttrString, phba->BIOSVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
+ &vport->fabric_nodename);
}
static int
-lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
- ae = &ad->AttrValue;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
+ phba->BIOSVersion);
+}
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
+{
/* Driver doesn't have access to this information */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
}
static int
-lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "EMULEX",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
}
-/* Routines for all individual PORT attributes */
+/*
+ * Routines for all individual PORT attributes
+ */
+
static int
-lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if Firmware supports NVME and on physical port */
if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
phba->sli4_hba.pc_sli4_params.nvme)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ u32 speeds = 0;
u32 tcfg;
u8 i, cnt;
- ae = &ad->AttrValue;
-
- ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
cnt = 0;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -2929,539 +2804,314 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
if (cnt > 2) { /* 4 lane trunk group */
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
} else if (cnt) { /* 2 lane trunk group */
if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+ speeds |= HBA_PORTSPEED_32GFC;
} else {
if (phba->lmt & LMT_256Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+ speeds |= HBA_PORTSPEED_32GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
+ speeds |= HBA_PORTSPEED_16GFC;
if (phba->lmt & LMT_10Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
+ speeds |= HBA_PORTSPEED_10GFC;
if (phba->lmt & LMT_8Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
+ speeds |= HBA_PORTSPEED_8GFC;
if (phba->lmt & LMT_4Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
+ speeds |= HBA_PORTSPEED_4GFC;
if (phba->lmt & LMT_2Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
+ speeds |= HBA_PORTSPEED_2GFC;
if (phba->lmt & LMT_1Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+ speeds |= HBA_PORTSPEED_1GFC;
}
} else {
/* FCoE links support only one speed */
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ u32 speeds = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+ speeds = HBA_PORTSPEED_1GFC;
break;
case LPFC_LINK_SPEED_2GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+ speeds = HBA_PORTSPEED_2GFC;
break;
case LPFC_LINK_SPEED_4GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+ speeds = HBA_PORTSPEED_4GFC;
break;
case LPFC_LINK_SPEED_8GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+ speeds = HBA_PORTSPEED_8GFC;
break;
case LPFC_LINK_SPEED_10GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+ speeds = HBA_PORTSPEED_10GFC;
break;
case LPFC_LINK_SPEED_16GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+ speeds = HBA_PORTSPEED_16GFC;
break;
case LPFC_LINK_SPEED_32GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+ speeds = HBA_PORTSPEED_32GFC;
break;
case LPFC_LINK_SPEED_64GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_64GFC;
+ speeds = HBA_PORTSPEED_64GFC;
break;
case LPFC_LINK_SPEED_128GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ speeds = HBA_PORTSPEED_128GFC;
break;
case LPFC_LINK_SPEED_256GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ speeds = HBA_PORTSPEED_256GFC;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
} else {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
{
- struct serv_parm *hsp;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
- hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) hsp->cmn.bbRcvSizeLsb;
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
+ (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t)hsp->cmn.bbRcvSizeLsb);
}
static int
-lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
+ shost->host_no);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
- "/sys/class/scsi_host/host%d", shost->host_no);
- len = strnlen((char *)ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
- scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- vport->phba->os_host_name);
-
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
- len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
}
static int
-lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
- ae = &ad->AttrValue;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
- else
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
+ LPFC_FDMI_PORTTYPE_NLPORT :
+ LPFC_FDMI_PORTTYPE_NPORT);
}
static int
-lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
+ FC_COS_CLASS2 | FC_COS_CLASS3);
}
static int
-lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
+ &vport->fabric_portname);
}
static int
-lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if NVME is configured or not */
if (vport == phba->pport &&
phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- /* Link Up - operational */
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
+ LPFC_FDMI_PORTSTATE_ONLINE);
}
static int
-lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
- ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
+ vport->fdmi_num_disc);
}
static int
-lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
}
static int
-lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Initiator",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
+ "Smart SAN Initiator");
}
static int
-lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy((((uint8_t *)&ae->un.AttrString) +
- sizeof(struct lpfc_name)),
- &vport->fc_sparam.portName, sizeof(struct lpfc_name));
- size = FOURBYTES + (2 * sizeof(struct lpfc_name));
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
- return size;
+ return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
+ &vport->fc_sparam.nodeName,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
+ "Smart SAN Version 2.0");
}
static int
-lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
/* SRIOV (type 3) is not supported */
- if (vport->vpi)
- ae->un.AttrInt = cpu_to_be32(2); /* NPIV */
- else
- ae->un.AttrInt = cpu_to_be32(1); /* Physical */
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
+ (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */);
}
static int
-lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
}
static int
-lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
}
static int
-lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- char mibrevision[16];
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
- sprintf(mibrevision, "ELXE2EM:%04d",
- phba->sli4_hba.pc_sli4_params.mi_ver);
- strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
- return size;
+ char buf[32] = { 0 };
+
+ sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
}
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */
lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */
@@ -3485,7 +3135,7 @@ int (*lpfc_fdmi_hba_action[])
/* RPA / RPRT attribute jump table */
int (*lpfc_fdmi_port_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */
lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */
@@ -3527,20 +3177,20 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int cmdcode, uint32_t new_mask)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_dmabuf *rq, *rsp;
struct lpfc_sli_ct_request *CtReq;
- struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
uint32_t bit_pos;
- uint32_t size;
+ uint32_t size, addsz;
uint32_t rsp_size;
uint32_t mask;
struct lpfc_fdmi_reg_hba *rh;
struct lpfc_fdmi_port_entry *pe;
- struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
struct lpfc_fdmi_attr_block *ab = NULL;
- int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
- void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
+ int (*func)(struct lpfc_vport *vport, void *attrbuf);
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
if (!ndlp)
return 0;
@@ -3549,25 +3199,29 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
goto fdmi_cmd_exit;
- mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
- if (!mp->virt)
- goto fdmi_cmd_free_mp;
+ rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
+ if (!rq->virt)
+ goto fdmi_cmd_free_rq;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
- goto fdmi_cmd_free_mpvirt;
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ goto fdmi_cmd_free_rqvirt;
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
- if (!bmp->virt)
- goto fdmi_cmd_free_bmp;
+ rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
+ if (!rsp->virt)
+ goto fdmi_cmd_free_rsp;
- INIT_LIST_HEAD(&mp->list);
- INIT_LIST_HEAD(&bmp->list);
+ INIT_LIST_HEAD(&rq->list);
+ INIT_LIST_HEAD(&rsp->list);
+
+ /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
+ memset(rq->virt, 0, LPFC_BPL_SIZE);
+ rsp_size = LPFC_BPL_SIZE;
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3575,10 +3229,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cmdcode, new_mask, vport->fdmi_port_mask,
vport->fc_flag, vport->port_state);
- CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ CtReq = (struct lpfc_sli_ct_request *)rq->virt;
/* First populate the CT_IU preamble */
- memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
@@ -3586,17 +3239,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
- rsp_size = LPFC_BPL_SIZE;
+
size = 0;
/* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
- rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
/* HBA Identifier */
memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_fdmi_hba_ident);
if (cmdcode == SLI_MGMT_RHBA) {
/* Registered Port List */
@@ -3605,16 +3259,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&rh->rpl.pe.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof(struct lpfc_name) +
- FOURBYTES;
- } else {
- size = sizeof(struct lpfc_name);
+ size += sizeof(struct lpfc_fdmi_reg_port_list);
}
+
ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
ab->EntryCnt = 0;
- size += FOURBYTES;
+ size += FOURBYTES; /* add length of EntryCnt field */
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3625,11 +3276,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_hba_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)rh + size));
- ab->EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)rh + size));
+ if (addsz) {
+ ab->EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto hba_out;
}
@@ -3639,7 +3292,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hba_out:
ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_RPRT:
@@ -3650,22 +3303,29 @@ hba_out:
}
fallthrough;
case SLI_MGMT_RPA:
- pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+ /* Store base ptr right after preamble */
+ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
+
if (cmdcode == SLI_MGMT_RPRT) {
- rh = (struct lpfc_fdmi_reg_hba *)pab;
+ rh = (struct lpfc_fdmi_reg_hba *)base;
/* HBA Identifier */
memcpy(&rh->hi.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
pab = (struct lpfc_fdmi_reg_portattr *)
- ((uint8_t *)pab + sizeof(struct lpfc_name));
+ ((uint8_t *)base + sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_name);
+ } else {
+ pab = base;
}
memcpy((uint8_t *)&pab->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
- size += sizeof(struct lpfc_name) + FOURBYTES;
pab->ab.EntryCnt = 0;
+ /* add length of name and EntryCnt field */
+ size += sizeof(struct lpfc_name) + FOURBYTES;
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3676,11 +3336,13 @@ hba_out:
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_port_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)pab + size));
- pab->ab.EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)base + size));
+ if (addsz) {
+ pab->ab.EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto port_out;
}
@@ -3689,10 +3351,7 @@ hba_out:
}
port_out:
pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
- /* Total size */
- if (cmdcode == SLI_MGMT_RPRT)
- size += sizeof(struct lpfc_name);
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_GHAT:
@@ -3701,7 +3360,7 @@ port_out:
fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3720,7 +3379,7 @@ port_out:
}
fallthrough;
case SLI_MGMT_DPA:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3733,31 +3392,32 @@ port_out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0298 FDMI cmdcode x%x not supported\n",
cmdcode);
- goto fdmi_cmd_free_bmpvirt;
+ goto fdmi_cmd_free_rspvirt;
}
CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *)bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = size;
+ bde = (struct ulp_bde64_le *)rsp->virt;
+ bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
+ bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
+ bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
+ ULP_BDE64_TYPE_SHIFT);
+ bde->type_size |= cpu_to_le32(size);
/*
* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
return 0;
-fdmi_cmd_free_bmpvirt:
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-fdmi_cmd_free_bmp:
- kfree(bmp);
-fdmi_cmd_free_mpvirt:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-fdmi_cmd_free_mp:
- kfree(mp);
+fdmi_cmd_free_rspvirt:
+ lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
+fdmi_cmd_free_rsp:
+ kfree(rsp);
+fdmi_cmd_free_rqvirt:
+ lpfc_mbuf_free(phba, rq->virt, rq->phys);
+fdmi_cmd_free_rq:
+ kfree(rq);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3912,6 +3572,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *ctrsp = outp->virt;
u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
struct app_id_object *app;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
u32 cmd, hash, bucket;
struct lpfc_vmid *vmp, *cur;
u8 *data = outp->virt;
@@ -3923,7 +3584,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
if (cmd != SLI_CTAS_DALLAPP_ID)
- return;
+ goto free_res;
}
/* Check for a CT LS_RJT response */
if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@@ -3938,7 +3599,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
- return;
+ goto free_res;
}
}
@@ -3952,7 +3613,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
app->obj.entity_id_len);
if (app->obj.entity_id_len == 0 || app->port_id == 0)
- return;
+ goto free_res;
hash = lpfc_vmid_hash_fn(app->obj.entity_id,
app->obj.entity_id_len);
@@ -3999,6 +3660,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
"8857 Invalid command code\n");
}
+free_res:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5037ea09a810..f5252e45a48a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5156,7 +5156,7 @@ error_out:
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
- uint16_t ext_cnt, ext_size;
+ uint16_t ext_cnt = 0, ext_size = 0;
len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
@@ -5531,7 +5531,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
if (!debug->buffer) {
kfree(debug);
goto out;
@@ -5552,57 +5552,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
struct lpfc_rx_monitor_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
char *buffer = debug->buffer;
- struct rxtable_entry *entry;
- int i, len = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
- /* Table is getting updated */
- msleep(20);
- head = atomic_read(&phba->rxtable_idx_head);
- }
- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
- if (!phba->rxtable || head == tail) {
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "Rxtable is empty\n");
- goto out;
- }
- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
- start = tail;
-
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
- "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
- "Bsy IO_cnt Info BWutil(ms)\n");
-get_table:
- for (i = start; i < last; i++) {
- entry = &phba->rxtable[i];
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld %12lld %12lld "
- "%7lldus %8lld %7lld "
- "%2d %4d %2d %2d(%2d)\n",
- i, entry->max_bytes_per_interval,
- entry->cmf_bytes,
- entry->total_bytes,
- entry->rcv_bytes,
- entry->avg_io_latency,
- entry->avg_io_size,
- entry->max_read_cnt,
- entry->cmf_busy,
- entry->io_cnt,
- entry->cmf_info,
- entry->timer_utilization,
- entry->timer_interval);
+ if (!phba->rx_monitor) {
+ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
+ "Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
+ MAX_DEBUGFS_RX_INFO_SIZE,
+ LPFC_MAX_RXMONITOR_ENTRY);
}
- if (head != last) {
- start = 0;
- last = head;
- goto get_table;
- }
-out:
- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
+ strlen(buffer));
}
static int
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6dd361c1fd31..8d2e8d05bbc0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 37a4b79010bf..f82615d87c4b 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -149,7 +149,6 @@ struct lpfc_nodelist {
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
- struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
@@ -188,7 +187,6 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
-#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 9e69de9eb992..863b2125fed6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2200,10 +2200,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
-
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PLOGI request, remainder of payload is service parameters */
@@ -3992,7 +3988,8 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"4676 Fabric EDC Rsp: "
"0x%02x, 0x%08x\n",
edc_rsp->acc_hdr.la_cmd,
@@ -4029,18 +4026,18 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6462 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
- lpfc_printf_log(
- phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
"4617 Link Fault Desc Data: 0x%08x 0x%08x "
"0x%08x 0x%08x 0x%08x\n",
be32_to_cpu(plnkflt->desc_tag),
@@ -4120,8 +4117,26 @@ out:
}
static void
-lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
+ struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
+
+ lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
+ lft->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
+
+ lft->degrade_activate_threshold =
+ cpu_to_be32(phba->degrade_activate_threshold);
+ lft->degrade_deactivate_threshold =
+ cpu_to_be32(phba->degrade_deactivate_threshold);
+ lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
+
/* We are assuming cgd was zero'ed before calling this routine */
/* Configure the congestion detection capability */
@@ -4165,6 +4180,23 @@ lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
}
+static bool
+lpfc_link_is_lds_capable(struct lpfc_hba *phba)
+{
+ if (!(phba->lmt & LMT_64Gb))
+ return false;
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return false;
+
+ if (phba->sli4_hba.conf_trunk) {
+ if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
+ return true;
+ } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
+ return true;
+ }
+ return false;
+}
+
/**
* lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
* @vport: pointer to a host virtual N_Port data structure.
@@ -4192,12 +4224,12 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_els_edc_req *edc_req;
- struct fc_diag_cg_sig_desc *cgn_desc;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
u16 cmdsize;
struct lpfc_nodelist *ndlp;
u8 *pcmd = NULL;
- u32 edc_req_size, cgn_desc_size;
+ u32 cgn_desc_size, lft_desc_size;
int rc;
if (vport->port_type == LPFC_NPIV_PORT)
@@ -4207,13 +4239,17 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
return -ENODEV;
- /* If HBA doesn't support signals, drop into RDF */
- if (!phba->cgn_init_reg_signal)
+ cgn_desc_size = (phba->cgn_init_reg_signal) ?
+ sizeof(struct fc_diag_cg_sig_desc) : 0;
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize = cgn_desc_size + lft_desc_size;
+
+ /* Skip EDC if no applicable descriptors */
+ if (!cmdsize)
goto try_rdf;
- edc_req_size = sizeof(struct fc_els_edc);
- cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
- cmdsize = edc_req_size + cgn_desc_size;
+ cmdsize += sizeof(struct fc_els_edc);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_EDC);
if (!elsiocb)
@@ -4222,15 +4258,19 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
/* Configure the payload for the supported Diagnostics capabilities. */
pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_req = (struct lpfc_els_edc_req *)pcmd;
- edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
- edc_req->edc.edc_cmd = ELS_EDC;
-
- cgn_desc = &edc_req->cgn_desc;
+ edc_req = (struct fc_els_edc *)pcmd;
+ edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
+ edc_req->edc_cmd = ELS_EDC;
+ tlv = edc_req->desc;
- lpfc_format_edc_cgn_desc(phba, cgn_desc);
+ if (cgn_desc_size) {
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ tlv = fc_tlv_next_desc(tlv);
+ }
- phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4623 Xmit EDC to remote "
@@ -4676,47 +4716,52 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
- /* The driver has a VALID PLOGI but the rport has
- * rejected the PRLI - can't do it now. Delay
- * for 1 second and try again.
- *
- * However, if explanation is REQ_UNSUPPORTED there's
- * no point to retry PRLI.
+ /* Special case for PRLI LS_RJTs. Recall that lpfc
+ * uses a single routine to issue both PRLI FC4 types.
+ * If the PRLI is rejected because that FC4 type
+ * isn't really supported, don't retry and cause
+ * multiple transport registrations. Otherwise, parse
+ * the reason code/reason code explanation and take the
+ * appropriate action.
*/
- if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
- stat.un.b.lsRjtRsnCodeExp !=
- LSEXP_REQ_UNSUPPORTED) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
- break;
- }
-
- /* Legacy bug fix code for targets with PLOGI delays. */
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CMD_IN_PROGRESS) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS | LOG_NODE,
+ "0153 ELS cmd x%x LS_RJT by x%x. "
+ "RsnCode x%x RsnCodeExp x%x\n",
+ cmd, did, stat.un.b.lsRjtRsnCode,
+ stat.un.b.lsRjtRsnCodeExp);
+
+ switch (stat.un.b.lsRjtRsnCodeExp) {
+ case LSEXP_CANT_GIVE_DATA:
+ case LSEXP_CMD_IN_PROGRESS:
if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
- }
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CANT_GIVE_DATA) {
- if (cmd == ELS_CMD_PLOGI) {
+ case LSEXP_REQ_UNSUPPORTED:
+ case LSEXP_NO_RSRC_ASSIGN:
+ /* These explanation codes get no retry. */
+ if (cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI)
+ break;
+ fallthrough;
+ default:
+ /* Limit the delay and retry action to a limited
+ * cmd set. There are other ELS commands where
+ * a retry is not expected.
+ */
+ if (cmd == ELS_CMD_PLOGI ||
+ cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI) {
delay = 1000;
- maxretry = 48;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
}
- retry = 1;
- break;
- }
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
break;
}
+
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
@@ -4797,13 +4842,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_REQ_UNSUPPORTED) {
- if (cmd == ELS_CMD_PRLI) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
- retry = 0;
+ if (cmd == ELS_CMD_PRLI)
goto out_retry;
- }
}
break;
}
@@ -5784,14 +5824,21 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_els_edc_rsp *edc_rsp;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
struct lpfc_iocbq *elsiocb;
IOCB_t *icmd, *cmd;
union lpfc_wqe128 *wqe;
+ u32 cgn_desc_size, lft_desc_size;
+ u16 cmdsize;
uint8_t *pcmd;
- int cmdsize, rc;
+ int rc;
- cmdsize = sizeof(struct lpfc_els_edc_rsp);
+ cmdsize = sizeof(struct fc_els_edc_resp);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize += cgn_desc_size + lft_desc_size;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
@@ -5813,15 +5860,19 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
- edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
- edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
- FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
- edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
- edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+ edc_rsp = (struct fc_els_edc_resp *)pcmd;
+ edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
+ cgn_desc_size + lft_desc_size);
+ edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->lsri.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
- edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
- lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+ edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
+ tlv = edc_rsp->desc;
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue EDC ACC: did:x%x flg:x%x refcnt %d",
@@ -6006,7 +6057,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
@@ -6069,7 +6120,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
@@ -9086,7 +9137,7 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *ptr, dtag;
const char *dtag_nm;
int desc_cnt = 0, bytes_remain;
- bool rcv_cap_desc = false;
+ struct fc_diag_lnkflt_desc *plnkflt;
payload = cmdiocb->cmd_dmabuf->virt;
@@ -9094,7 +9145,8 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
bytes_remain = be32_to_cpu(edc_req->desc_len);
ptr = (uint32_t *)payload;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"3319 Rcv EDC payload len %d: x%x x%x x%x\n",
bytes_remain, be32_to_cpu(*ptr),
be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
@@ -9113,9 +9165,10 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* cycle through EDC diagnostic descriptors to find the
* congestion signaling capability descriptor
*/
- while (bytes_remain && !rcv_cap_desc) {
+ while (bytes_remain) {
if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6464 Truncated TLV hdr on "
"Diagnostic descriptor[%d]\n",
desc_cnt);
@@ -9128,16 +9181,27 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6465 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
- /* No action for Link Fault descriptor for now */
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4626 Link Fault Desc Data: x%08x len x%x "
+ "da x%x dd x%x interval x%x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
break;
case ELS_DTAG_CG_SIGNAL_CAP:
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
@@ -9164,11 +9228,11 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_least_capable_settings(
phba, (struct fc_diag_cg_sig_desc *)tlv);
- rcv_cap_desc = true;
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6467 unknown Diagnostic "
"Descriptor[%d]: tag x%x (%s)\n",
desc_cnt, dtag, dtag_nm);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2645def612e6..d38ebd7281b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1242,6 +1242,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
@@ -1353,8 +1355,13 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ if (phba->defer_flogi_acc_flag)
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ else
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
+ FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
@@ -1392,7 +1399,6 @@ lpfc_linkup(struct lpfc_hba *phba)
/* reinitialize initial HBA flag */
phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
- phba->defer_flogi_acc_flag = false;
return 0;
}
@@ -2150,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -2163,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
@@ -2964,7 +2970,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
@@ -3069,7 +3075,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -3790,6 +3796,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->cmf_active_mode != LPFC_CFG_OFF)
lpfc_cmf_signal_init(phba);
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -4389,8 +4398,11 @@ out:
rc = lpfc_issue_els_edc(vport, 0);
lpfc_printf_log(phba, KERN_INFO,
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
- "4220 EDC issue error x%x, Data: x%x\n",
+ "4220 Issue EDC status x%x Data x%x\n",
rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
} else {
lpfc_issue_els_rdf(vport, 0);
}
@@ -4788,22 +4800,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
new_state == NLP_STE_UNMAPPED_NODE)
lpfc_nlp_reg_node(vport, ndlp);
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
@@ -6648,7 +6644,6 @@ lpfc_nlp_release(struct kref *kref)
ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- kfree(ndlp->lat_data);
if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
mempool_free(ndlp->active_rrqs_xri_bitmap,
ndlp->phba->active_rrq_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 071983e2cdfe..5c283936ff08 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -703,6 +703,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
} un;
@@ -1441,30 +1442,56 @@ struct lpfc_vmid_gallapp_ident_list {
/* Definitions for HBA / Port attribute entries */
-/* Attribute Entry */
-struct lpfc_fdmi_attr_entry {
- union {
- uint32_t AttrInt;
- uint8_t AttrTypes[32];
- uint8_t AttrString[256];
- struct lpfc_name AttrWWN;
- } un;
+/* Attribute Entry Structures */
+
+struct lpfc_fdmi_attr_u32 {
+ __be16 type;
+ __be16 len;
+ __be32 value_u32;
};
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- /* Marks start of Value (ATTRIBUTE_ENTRY) */
- struct lpfc_fdmi_attr_entry AttrValue;
-} __packed;
+struct lpfc_fdmi_attr_wwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 name[8];
+};
+
+struct lpfc_fdmi_attr_fullwwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 nname[8];
+ u8 pname[8];
+};
+
+struct lpfc_fdmi_attr_fc4types {
+ __be16 type;
+ __be16 len;
+ u8 value_types[32];
+};
+
+struct lpfc_fdmi_attr_string {
+ __be16 type;
+ __be16 len;
+ char value_string[256];
+};
+
+/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */
+#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string)
/*
* HBA Attribute Block
*/
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
- struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+ /* Variable Length Attribute Entry TLV's follow */
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4527fef23ae7..5288fc69908a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -738,6 +738,7 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
#define LPFC_SEC_TO_MSEC 1000
+#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000)
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -3483,9 +3484,10 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x12
-#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_LD_SIGNAL 0x23
#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
@@ -3516,13 +3518,17 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_cmf_SHIFT 0
#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_lds_qry_SHIFT 0
+#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001
+#define lpfc_mbx_set_feature_lds_qry_WORD word6
+#define LPFC_QUERY_LDS_OP 1
#define lpfc_mbx_set_feature_mi_SHIFT 0
#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
#define lpfc_mbx_set_feature_mi_WORD word6
#define lpfc_mbx_set_feature_milunq_SHIFT 16
#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
#define lpfc_mbx_set_feature_milunq_WORD word6
- uint32_t word7;
+ u32 word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UERP_WORD word7
@@ -3536,6 +3542,8 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
+ u32 word9;
+ u32 word10;
};
@@ -4313,7 +4321,7 @@ struct lpfc_acqe_cgn_signal {
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
- uint32_t reserved;
+ uint32_t event_data3;
uint32_t trailer;
#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
@@ -4326,6 +4334,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
+#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
};
/*
@@ -4798,6 +4807,9 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
+#define cmf_sync_period_SHIFT 16
+#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
};
@@ -5046,22 +5058,6 @@ struct lpfc_grp_hdr {
{ FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
}
-/* EDC supports two descriptors. When allocated, it is the
- * size of this structure plus each supported descriptor.
- */
-struct lpfc_els_edc_req {
- struct fc_els_edc edc; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
-/* Minimum structure defines for the EDC response.
- * Balance is in buffer.
- */
-struct lpfc_els_edc_rsp {
- struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
/* Used for logging FPIN messages */
#define LPFC_FPIN_WWPN_LINE_SZ 128
#define LPFC_FPIN_WWPN_LINE_CNT 6
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 55a1ad6eed03..b535f1fd3010 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -325,8 +325,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
- if (prg->dist < 4)
- dist = dist_char[prg->dist];
+ dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
@@ -2258,6 +2257,101 @@ lpfc_handle_latt_err_exit:
return;
}
+static void
+lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
+{
+ int i, j;
+
+ while (length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->SerialNumber[j++] = vpd[(*pindex)++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelDesc[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelName[j++] = vpd[(*pindex)++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ProgramType[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3 + i);
+ while (i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ (*pindex)++;
+ } else
+ phba->Port[j++] = vpd[(*pindex)++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ } else {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ *pindex += i;
+ length -= (3 + i);
+ }
+ }
+}
+
/**
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
* @phba: pointer to lpfc hba data structure.
@@ -2277,7 +2371,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
- int i, j;
+ int i;
int finished = 0;
int index = 0;
@@ -2310,101 +2404,10 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
Length = ((((unsigned short)lenhi) << 8) + lenlo);
if (Length > len - index)
Length = len - index;
- while (Length > 0) {
- /* Look for Serial Number */
- if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->SerialNumber[j++] = vpd[index++];
- if (j == 31)
- break;
- }
- phba->SerialNumber[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
- phba->vpd_flag |= VPD_MODEL_DESC;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelDesc[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ModelDesc[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
- phba->vpd_flag |= VPD_MODEL_NAME;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelName[j++] = vpd[index++];
- if (j == 79)
- break;
- }
- phba->ModelName[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
- phba->vpd_flag |= VPD_PROGRAM_TYPE;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ProgramType[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ProgramType[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
- phba->vpd_flag |= VPD_PORT;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_GET)) {
- j++;
- index++;
- } else
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
- }
- if ((phba->sli_rev != LPFC_SLI_REV4) ||
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_NON))
- phba->Port[j] = 0;
- continue;
- }
- else {
- index += 2;
- i = vpd[index];
- index += 1;
- index += i;
- Length -= (3 + i);
- }
- }
- finished = 0;
- break;
+
+ lpfc_fill_vpd(phba, vpd, Length, &index);
+ finished = 0;
+ break;
case 0x78:
finished = 1;
break;
@@ -4614,6 +4617,17 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
+static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ return LPFC_MAX_SG_TABLESIZE;
+ else
+ return phba->cfg_scsi_seg_cnt;
+ else
+ return phba->cfg_sg_seg_cnt;
+}
+
/**
* lpfc_vmid_res_alloc - Allocates resources for VMID
* @phba: pointer to lpfc hba data structure.
@@ -4716,42 +4730,26 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
- template = &phba->port_template;
-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
- memcpy(template, &lpfc_template, sizeof(*template));
+ template = &lpfc_template;
if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
template->eh_host_reset_handler = NULL;
- /* Template for all vports this physical port creates */
- memcpy(&phba->vport_template, &lpfc_template,
- sizeof(*template));
- phba->vport_template.shost_groups = lpfc_vport_groups;
- phba->vport_template.eh_bus_reset_handler = NULL;
- phba->vport_template.eh_host_reset_handler = NULL;
- phba->vport_template.vendor_id = 0;
-
- /* Initialize the host templates with updated value */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- template->sg_tablesize = phba->cfg_scsi_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_scsi_seg_cnt;
- } else {
- template->sg_tablesize = phba->cfg_sg_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_sg_seg_cnt;
- }
-
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
/* NVMET is for physical port only */
- memcpy(template, &lpfc_template_nvme,
- sizeof(*template));
+ template = &lpfc_template_nvme;
}
} else {
- template = &phba->vport_template;
+ /* Seed vport template */
+ template = &lpfc_vport_template;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
@@ -4784,11 +4782,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-
- if (phba->cfg_xpsgl && !phba->nvmet_support)
- shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
- else
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -4819,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
rc = lpfc_vmid_res_alloc(phba, vport);
if (rc)
- goto out;
+ goto out_put_shost;
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
@@ -4837,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
- goto out_put_shost;
+ goto out_free_vmid;
spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->port_list_lock);
return vport;
-out_put_shost:
+out_free_vmid:
kfree(vport->vmid);
bitmap_free(vport->vmid_priority_range);
+out_put_shost:
scsi_host_put(shost);
out:
return NULL;
@@ -5569,38 +5563,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
{
- struct rxtable_entry *entry;
- int cnt = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (!phba->rxtable || head == tail) {
- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "4411 Rxtable is empty\n");
- return;
- }
- last = tail;
- start = head;
-
- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
- while (start != last) {
- if (start)
- start--;
- else
- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
- entry = &phba->rxtable[start];
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
- "Lat %lld ASz %lld Info %02d BWUtil %d "
- "Int %d slot %d\n",
- cnt, entry->max_bytes_per_interval,
- entry->total_bytes, entry->rcv_bytes,
- entry->avg_io_latency, entry->avg_io_size,
- entry->cmf_info, entry->timer_utilization,
- entry->timer_interval, start);
- cnt++;
- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
- return;
+ "4411 Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
+ LPFC_MAX_RXMONITOR_DUMP);
}
}
@@ -6007,9 +5975,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
{
struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
cmf_timer);
- struct rxtable_entry *entry;
+ struct rx_info_entry entry;
uint32_t io_cnt;
- uint32_t head, tail;
uint32_t busy, max_read;
uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
@@ -6129,40 +6096,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
}
/* Save rxmonitor information for debug */
- if (phba->rxtable) {
- head = atomic_xchg(&phba->rxtable_idx_head,
- LPFC_RXMONITOR_TABLE_IN_USE);
- entry = &phba->rxtable[head];
- entry->total_bytes = total;
- entry->cmf_bytes = total + extra;
- entry->rcv_bytes = rcv;
- entry->cmf_busy = busy;
- entry->cmf_info = phba->cmf_active_info;
+ if (phba->rx_monitor) {
+ entry.total_bytes = total;
+ entry.cmf_bytes = total + extra;
+ entry.rcv_bytes = rcv;
+ entry.cmf_busy = busy;
+ entry.cmf_info = phba->cmf_active_info;
if (io_cnt) {
- entry->avg_io_latency = div_u64(lat, io_cnt);
- entry->avg_io_size = div_u64(rcv, io_cnt);
+ entry.avg_io_latency = div_u64(lat, io_cnt);
+ entry.avg_io_size = div_u64(rcv, io_cnt);
} else {
- entry->avg_io_latency = 0;
- entry->avg_io_size = 0;
+ entry.avg_io_latency = 0;
+ entry.avg_io_size = 0;
}
- entry->max_read_cnt = max_read;
- entry->io_cnt = io_cnt;
- entry->max_bytes_per_interval = mbpi;
+ entry.max_read_cnt = max_read;
+ entry.io_cnt = io_cnt;
+ entry.max_bytes_per_interval = mbpi;
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
- entry->timer_utilization = phba->cmf_last_ts;
+ entry.timer_utilization = phba->cmf_last_ts;
else
- entry->timer_utilization = ms;
- entry->timer_interval = ms;
+ entry.timer_utilization = ms;
+ entry.timer_interval = ms;
phba->cmf_last_ts = 0;
- /* Increment rxtable index */
- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (head == tail) {
- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- atomic_set(&phba->rxtable_idx_tail, tail);
- }
- atomic_set(&phba->rxtable_idx_head, head);
+ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
}
if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
@@ -6232,6 +6189,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
{
uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+ u8 cnt = 0;
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -6250,26 +6208,36 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
phba->trunk_link.link1.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
phba->trunk_link.link2.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
phba->trunk_link.link3.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ cnt++;
}
+ if (cnt)
+ phba->trunk_link.phy_lnk_speed =
+ phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
+ else
+ phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
+
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
@@ -6347,7 +6315,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
LPFC_FC_LA_TYPE_LINK_DOWN)
phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
+ else if (!phba->sli4_hba.conf_trunk)
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
@@ -6465,7 +6433,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"2901 Async SLI event - Type:%d, Event Data: x%08x "
"x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- acqe_sli->reserved, acqe_sli->trailer);
+ acqe_sli->event_data3, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -6494,7 +6462,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
@@ -6672,6 +6640,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
}
}
break;
+ case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
+ /* May be accompanied by a temperature event */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
+ "2902 Remote Degrade Signaling: x%08x x%08x "
+ "x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ acqe_sli->event_data3);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -7085,6 +7062,12 @@ lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
spin_unlock_irq(&phba->hbalock);
}
+static const char * const lpfc_cmf_mode_to_str[] = {
+ "OFF",
+ "MANAGED",
+ "MONITOR",
+};
+
/**
* lpfc_cgn_params_parse - Process a FW cong parm change event
* @phba: pointer to lpfc hba data structure.
@@ -7104,6 +7087,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
{
struct lpfc_cgn_info *cp;
uint32_t crc, oldmode;
+ char acr_string[4] = {0};
/* Make sure the FW has encoded the correct magic number to
* validate the congestion parameter in FW memory.
@@ -7180,9 +7164,6 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MONITOR:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4661 Switch from MANAGED to "
- "`MONITOR mode\n");
phba->cmf_max_bytes_per_interval =
phba->cmf_link_byte_count;
@@ -7201,14 +7182,26 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MANAGED:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4662 Switch from MONITOR to "
- "MANAGED mode\n");
lpfc_cmf_signal_init(phba);
break;
}
break;
}
+ if (oldmode != LPFC_CFG_OFF ||
+ oldmode != phba->cgn_p.cgn_param_mode) {
+ if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
+ scnprintf(acr_string, sizeof(acr_string), "%u",
+ phba->cgn_p.cgn_param_level0);
+ else
+ scnprintf(acr_string, sizeof(acr_string), "NA");
+
+ dev_info(&phba->pcidev->dev, "%d: "
+ "4663 CMF: Mode %s acr %s\n",
+ phba->brd_no,
+ lpfc_cmf_mode_to_str
+ [phba->cgn_p.cgn_param_mode],
+ acr_string);
+ }
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
"4669 FW cgn parm buf wrong magic 0x%x "
@@ -8315,8 +8308,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
+ if (!phba->lpfc_sg_dma_buf_pool) {
+ rc = -ENOMEM;
goto out_free_bsmbx;
+ }
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
@@ -8324,8 +8319,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp),
i, 0);
- if (!phba->lpfc_cmd_rsp_buf_pool)
+ if (!phba->lpfc_cmd_rsp_buf_pool) {
+ rc = -ENOMEM;
goto out_free_sg_dma_buf;
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -12416,7 +12413,7 @@ lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_irq_chann; i++) {
eqhdl = lpfc_get_eq_hdl(i);
- eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->irq = LPFC_IRQ_EMPTY;
eqhdl->phba = phba;
}
}
@@ -12789,7 +12786,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
__lpfc_cpuhp_remove(phba);
@@ -13053,9 +13050,17 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_DRIVER_HANDLER_NAME"%d", index);
eqhdl->idx = index;
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = pci_irq_vector(phba->pcidev, index);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0489 MSI-X fast-path (%d) "
+ "pci_irq_vec failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ eqhdl->irq = rc;
+
+ rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -13063,8 +13068,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
goto cfg_fail_out;
}
- eqhdl->irq = pci_irq_vector(phba->pcidev, index);
-
if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
@@ -13181,7 +13184,14 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ rc = pci_irq_vector(phba->pcidev, 0);
+ if (rc < 0) {
+ pci_free_irq_vectors(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0496 MSI pci_irq_vec failed (%d)\n", rc);
+ return rc;
+ }
+ eqhdl->irq = rc;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
@@ -13208,8 +13218,8 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
* MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - successful
- * other values - error
+ * Interrupt mode (2, 1, 0) - successful
+ * LPFC_INTR_ERROR - error
**/
static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
@@ -13254,7 +13264,14 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
intr_mode = 0;
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ retval = pci_irq_vector(phba->pcidev, 0);
+ if (retval < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0502 INTR pci_irq_vec failed (%d)\n",
+ retval);
+ return LPFC_INTR_ERROR;
+ }
+ eqhdl->irq = retval;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 4d455da9cd69..b39cefcd8703 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,7 +35,7 @@
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
-#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 870e53b8f81d..89cbeba06aea 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
phba->cgn_i = NULL;
}
- /* Free RX table */
- kfree(phba->rxtable);
- phba->rxtable = NULL;
+ /* Free RX Monitor */
+ if (phba->rx_monitor) {
+ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ }
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 938a5e435943..7a1563564df7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -112,62 +112,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
#define LPFC_INVALID_REFTAG ((u32)-1)
/**
- * lpfc_update_stats - Update statistical data for the command completion
- * @vport: The virtual port on which this call is executing.
- * @lpfc_cmd: lpfc scsi command object pointer.
- *
- * This function is called when there is a command completion and this
- * function updates the statistical data for the command completion.
- **/
-static void
-lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- unsigned long flags;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long latency;
- int i;
-
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- (cmd->result))
- return;
-
- latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
- rdata = lpfc_cmd->rdata;
- pnode = rdata->pnode;
-
- spin_lock_irqsave(shost->host_lock, flags);
- if (!pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return;
- }
-
- if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
- i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
- phba->bucket_step;
- /* check array subscript bounds */
- if (i < 0)
- i = 0;
- else if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT - 1;
- } else {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
- if (latency <= (phba->bucket_base +
- ((1<<i)*phba->bucket_step)))
- break;
- }
-
- pnode->lat_data[i].cmd_count++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
-/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -4335,8 +4279,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
cmd->retries, scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
-
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4617,7 +4559,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -6853,3 +6794,30 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
+
+struct scsi_host_template lpfc_vport_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = NULL,
+ .eh_host_reset_handler = NULL,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .shost_groups = lpfc_vport_groups,
+ .max_sectors = 0xFFFFFFFF,
+ .vendor_id = 0,
+ .change_queue_depth = scsi_change_queue_depth,
+ .track_queue_depth = 1,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 3836d7f6a575..eae56944f31b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
-struct lpfc_scsicmd_bkt {
- uint32_t cmd_count;
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 608016725db9..99d06dc7ddf6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1916,6 +1916,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
+ u16 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -1970,10 +1971,14 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
lpfc_acqe_cgn_frequency;
bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ warn_sync_period = lpfc_acqe_cgn_frequency;
} else {
/* We hit a FPIN warning condition */
bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
+ warn_sync_period =
+ LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
}
}
@@ -1989,6 +1994,7 @@ initpath:
bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
@@ -2850,6 +2856,7 @@ void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
uint16_t rpi, vpi;
@@ -2862,6 +2869,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ if (mp) {
+ pmb->ctx_buf = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -6202,6 +6215,9 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
LPFC_MBOXQ_t *mbox;
+ *extnt_count = 0;
+ *extnt_size = 0;
+
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -6817,8 +6833,13 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
phba->sli4_hba.pc_sli4_params.mi_ver);
break;
+ case LPFC_SET_LD_SIGNAL:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 16;
+ bf_set(lpfc_mbx_set_feature_lds_qry,
+ &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
+ break;
case LPFC_SET_ENABLE_CMF:
- bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
mbox->u.mqe.un.set_feature.param_len = 4;
bf_set(lpfc_mbx_set_feature_cmf,
@@ -7814,6 +7835,62 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
static void
+lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
+ "4622 SET_FEATURE (x%x) mbox failed, "
+ "status x%x add_status x%x, mbx status x%x\n",
+ LPFC_SET_LD_SIGNAL, shdr_status,
+ shdr_add_status, pmb->u.mb.mbxStatus);
+ phba->degrade_activate_threshold = 0;
+ phba->degrade_deactivate_threshold = 0;
+ phba->fec_degrade_interval = 0;
+ goto out;
+ }
+
+ phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
+ phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
+ phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
+ "4624 Success: da x%x dd x%x interval x%x\n",
+ phba->degrade_activate_threshold,
+ phba->degrade_deactivate_threshold,
+ phba->fec_degrade_interval);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+int
+lpfc_read_lds_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -7960,6 +8037,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
}
/**
+ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entries: Number of rx_info_entry objects to allocate in ring
+ *
+ * Return:
+ * 0 - Success
+ * ENOMEM - Failure to kmalloc
+ **/
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries)
+{
+ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
+ GFP_KERNEL);
+ if (!rx_monitor->ring)
+ return -ENOMEM;
+
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_lock_init(&rx_monitor->lock);
+ rx_monitor->entries = entries;
+
+ return 0;
+}
+
+/**
+ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ **/
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
+{
+ spin_lock(&rx_monitor->lock);
+ kfree(rx_monitor->ring);
+ rx_monitor->ring = NULL;
+ rx_monitor->entries = 0;
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_unlock(&rx_monitor->lock);
+}
+
+/**
+ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entry: Pointer to rx_info_entry
+ *
+ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
+ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
+ *
+ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
+ *
+ * In cases of old data overflow, we do a best effort of FIFO order.
+ **/
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+
+ spin_lock(ring_lock);
+ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
+ *tail_idx = (*tail_idx + 1) % ring_size;
+
+ /* Best effort of FIFO saved data */
+ if (*tail_idx == *head_idx)
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ spin_unlock(ring_lock);
+}
+
+/**
+ * lpfc_rx_monitor_report - Read out rx_monitor's ring
+ * @phba: Pointer to lpfc_hba object
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @buf: Pointer to char buffer that will contain rx monitor info data
+ * @buf_len: Length buf including null char
+ * @max_read_entries: Maximum number of entries to read out of ring
+ *
+ * Used to dump/read what's in rx_monitor's ring buffer.
+ *
+ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
+ * information to kmsg instead of filling out buf.
+ *
+ * Return:
+ * Number of entries read out of the ring
+ **/
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ struct rx_info_entry *entry;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+ u32 cnt = 0;
+ char tmp[DBG_LOG_STR_SZ] = {0};
+ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
+
+ if (!log_to_kmsg) {
+ /* clear the buffer to be sure */
+ memset(buf, 0, buf_len);
+
+ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
+ "%-8s%-8s%-8s%-16s\n",
+ "MaxBPI", "Tot_Data_CMF",
+ "Tot_Data_Cmd", "Tot_Data_Cmpl",
+ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+ /* Needs to be _bh because record is called from timer interrupt
+ * context
+ */
+ spin_lock_bh(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+ /* Read out this entry's data. */
+ if (!log_to_kmsg) {
+ /* If !log_to_kmsg, then store to buf. */
+ scnprintf(tmp, sizeof(tmp),
+ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
+ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
+ *head_idx, entry->max_bytes_per_interval,
+ entry->cmf_bytes, entry->total_bytes,
+ entry->rcv_bytes, entry->avg_io_latency,
+ entry->avg_io_size, entry->max_read_cnt,
+ entry->cmf_busy, entry->io_cnt,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval);
+
+ /* Check for buffer overflow */
+ if ((strlen(buf) + strlen(tmp)) >= buf_len)
+ break;
+
+ /* Append entry's data to buffer */
+ strlcat(buf, tmp, buf_len);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02u: MBPI %llu Xmit %llu "
+ "Cmpl %llu Lat %llu ASz %llu Info %02u "
+ "BWUtil %u Int %u slot %u\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size, entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval, *head_idx);
+ }
+
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ /* Don't feed more than max_read_entries */
+ cnt++;
+ if (cnt >= max_read_entries)
+ break;
+ }
+ spin_unlock_bh(ring_lock);
+
+ return cnt;
+}
+
+/**
* lpfc_cmf_setup - Initialize idle_stat tracking
* @phba: Pointer to HBA context object.
*
@@ -8133,19 +8376,29 @@ no_cmf:
phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
/* Allocate RX Monitor Buffer */
- if (!phba->rxtable) {
- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
- sizeof(struct rxtable_entry),
- GFP_KERNEL);
- if (!phba->rxtable) {
+ if (!phba->rx_monitor) {
+ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
+ GFP_KERNEL);
+
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2644 Failed to alloc memory "
"for RX Monitor Buffer\n");
return -ENOMEM;
}
+
+ /* Instruct the rx_monitor object to instantiate its ring */
+ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
+ LPFC_MAX_RXMONITOR_ENTRY)) {
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2645 Failed to alloc memory "
+ "for RX Monitor's Ring\n");
+ return -ENOMEM;
+ }
}
- atomic_set(&phba->rxtable_idx_head, 0);
- atomic_set(&phba->rxtable_idx_tail, 0);
+
return 0;
}
@@ -10322,12 +10575,10 @@ static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- int rc;
struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
lpfc_prep_embed_io(phba, lpfc_cmd);
- rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
- return rc;
+ return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
}
void
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1ddad5b170a6..cbb1aa1cf025 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -489,7 +489,7 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
- uint16_t irq;
+ int irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
@@ -611,6 +611,8 @@ struct lpfc_vector_map_info {
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+#define LPFC_IRQ_EMPTY 0xffffffff
+
/* Multi-XRI pool */
#define XRI_BATCH 8
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 63eba9928e4b..192d5630a44d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.5"
+#define LPFC_DRIVER_VERSION "14.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index f64ced04b912..ed1d7f7b88a3 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -245,9 +245,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
/* allocate the per cpu variable for holding */
/* the last access time stamp only if VMID is enabled */
if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
+ vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC);
if (!vmp->last_io_time) {
hash_del(&vmp->hnode);
vmp->flag = LPFC_VMID_SLOT_FREE;
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e7efb025ed50..4d171f5c213f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -809,74 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
-
-/**
- * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
- * @vport: Pointer to vport object.
- *
- * This function resets the statistical data for the vport. This function
- * is called with the host_lock held
- **/
-void
-lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->lat_data)
- memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
- sizeof(struct lpfc_scsicmd_bkt));
- }
-}
-
-
-/**
- * lpfc_alloc_bucket - Allocate data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * This function allocates data buffer required for all the FC
- * nodes of the vport to collect statistical data.
- **/
-void
-lpfc_alloc_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
-
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_ATOMIC);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0287 lpfc_alloc_bucket failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
- }
-}
-
-/**
- * lpfc_free_bucket - Free data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * Th function frees statistical data buffer of all the FC
- * nodes of the vport.
- **/
-void
-lpfc_free_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
- }
-}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index f4b8528dd2e7..fa60c146c169 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
-void lpfc_vport_reset_stat_data(struct lpfc_vport *);
-void lpfc_alloc_bucket(struct lpfc_vport *);
-void lpfc_free_bucket(struct lpfc_vport *);
-
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 157c3bdb50be..132de68c14e9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3979,7 +3979,7 @@ megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, c
app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
- return snprintf(buf, 8, "%u\n", app_hndl);
+ return sysfs_emit(buf, "%u\n", app_hndl);
}
@@ -4048,7 +4048,7 @@ megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *b
}
}
- return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv,
ldid_map, app_hndl);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 4ae85d590801..9be4ba61a076 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4021,10 +4021,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
u32 mfiStatus;
u32 fw_state;
- if ((mfiStatus = instance->instancet->check_reset(instance,
- instance->reg_set)) == 1) {
+ if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
return IRQ_HANDLED;
- }
mfiStatus = instance->instancet->clear_intr(instance);
if (mfiStatus == 0) {
@@ -5155,9 +5153,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->current_map_sz = ventura_map_sz;
fusion->max_map_sz = ventura_map_sz;
} else {
- fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->fw_supported_vd_count - 1));
+ fusion->old_map_sz =
+ struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
+ instance->fw_supported_vd_count);
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
fusion->max_map_sz =
@@ -5790,10 +5788,10 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
+ MAX_PHYSICAL_DEVICES);
instance->use_seqnum_jbod_fp =
instance->support_seqnum_jbod_fp;
@@ -7968,7 +7966,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
struct Scsi_Host *host;
struct megasas_instance *instance;
struct fusion_context *fusion;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
@@ -8040,9 +8038,9 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz =
+ struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
+ seq, MAX_PHYSICAL_DEVICES);
for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 83f69c33b01a..da1cad1ee123 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -326,9 +326,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
- expected_size =
- (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+ expected_size = struct_size((struct MR_FW_RAID_MAP *)0,
+ ldSpanMap,
+ le16_to_cpu(pDrvRaidMap->ldCount));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 09c5fe37754c..6650f8c8e9b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1310,7 +1310,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES);
cmd = megasas_get_cmd(instance);
if (!cmd) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index ce84f811e5e1..49e9a9048ee7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -942,7 +942,7 @@ struct MR_FW_RAID_MAP {
u8 reserved2[7];
struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
struct IO_REQUEST_INFO {
@@ -1053,7 +1053,7 @@ struct MR_FW_RAID_MAP_DYNAMIC {
struct MR_RAID_MAP_DESC_TABLE
raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
/* Variable Size buffer containing all data */
- u32 raid_map_desc_data[1];
+ u32 raid_map_desc_data[];
}; /* Dynamicaly sized RAID MAp structure */
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1148,7 +1148,7 @@ typedef struct LOG_BLOCK_SPAN_INFO {
struct MR_FW_RAID_MAP_ALL {
struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
} __attribute__ ((packed));
struct MR_DRV_RAID_MAP {
@@ -1182,7 +1182,7 @@ struct MR_DRV_RAID_MAP {
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
@@ -1193,7 +1193,7 @@ struct MR_DRV_RAID_MAP {
struct MR_DRV_RAID_MAP_ALL {
struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
} __packed;
@@ -1249,7 +1249,7 @@ struct MR_PD_CFG_SEQ {
struct MR_PD_CFG_SEQ_NUM_SYNC {
__le32 size;
__le32 count;
- struct MR_PD_CFG_SEQ seq[1];
+ struct MR_PD_CFG_SEQ seq[];
} __packed;
/* stream detection */
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
index f5cdbe48c150..ef86ca46646b 100644
--- a/drivers/scsi/mpi3mr/Makefile
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -3,3 +3,4 @@ obj-m += mpi3mr.o
mpi3mr-y += mpi3mr_os.o \
mpi3mr_fw.o \
mpi3mr_app.o \
+ mpi3mr_transport.o
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 4cd9f24e544c..0a2af48915a5 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2017-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2017-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_CNFG_H
#define MPI30_CNFG_H 1
@@ -100,6 +99,7 @@ struct mpi3_config_page_header {
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
@@ -135,6 +135,16 @@ struct mpi3_config_page_header {
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24)
+#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000)
+#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
@@ -210,7 +220,7 @@ struct mpi3_man_page0 {
u8 board_rework_day;
u8 board_rework_month;
__le16 board_rework_year;
- __le64 board_revision;
+ u8 board_revision[8];
u8 e_pack_fru[16];
u8 product_name[256];
};
@@ -226,6 +236,15 @@ struct mpi3_man_page1 {
};
#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man_page2 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c[3];
+ u8 oem_board_tracer_number[32];
+};
+#define MPI3_MAN2_PAGEVERSION (0x00)
+#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01)
struct mpi3_man5_phy_entry {
__le64 ioc_wwid;
__le64 device_name;
@@ -338,6 +357,8 @@ struct mpi3_man7_receptacle_info {
#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_LOCATION_HOST (0x04)
+#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
@@ -369,7 +390,8 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
-#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff)
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff)
+#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@@ -536,6 +558,10 @@ struct mpi3_man11_bkplane_spec_non_ubm_format {
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
@@ -825,19 +851,16 @@ struct mpi3_man_page21 {
};
#define MPI3_MAN21_PAGEVERSION (0x00)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_MASK (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_ENABLED (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_DISABLED (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x60)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x20)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x40)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x08)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x08)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x01)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x01)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
#endif
@@ -995,7 +1018,12 @@ struct mpi3_io_unit_page5 {
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
-#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@@ -1027,7 +1055,8 @@ struct mpi3_io_unit_page8 {
u8 slots_available;
u8 current_key_encryption_algo;
u8 key_digest_hash_algo;
- __le32 reserved10[2];
+ union mpi3_version_union current_svn;
+ __le32 reserved14;
__le32 current_key[128];
union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
};
@@ -1036,6 +1065,7 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
struct mpi3_io_unit_page9 {
@@ -1045,9 +1075,14 @@ struct mpi3_io_unit_page9 {
__le16 reserved0e;
};
-#define MPI3_IOUNIT9_PAGEVERSION (0x00)
-#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
-#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
struct mpi3_io_unit_page10 {
struct mpi3_config_page_header header;
u8 flags;
@@ -1090,6 +1125,57 @@ struct mpi3_io_unit_page11 {
struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
};
#define MPI3_IOUNIT11_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT12_BUCKET_MAX
+#define MPI3_IOUNIT12_BUCKET_MAX (1)
+#endif
+struct mpi3_iounit12_bucket {
+ u8 coalescing_depth;
+ u8 coalescing_timeout;
+ __le16 io_count_low_boundary;
+ __le32 reserved04;
+};
+struct mpi3_io_unit_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c[4];
+ u8 num_buckets;
+ u8 reserved1d[3];
+ struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX];
+};
+#define MPI3_IOUNIT12_PAGEVERSION (0x00)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003)
+#ifndef MPI3_IOUNIT13_FUNC_MAX
+#define MPI3_IOUNIT13_FUNC_MAX (1)
+#endif
+struct mpi3_iounit13_allowed_function {
+ __le16 sub_function;
+ u8 function_code;
+ u8 fuction_flags;
+};
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01)
+struct mpi3_io_unit_page13 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_functions;
+ u8 reserved0d[3];
+ struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX];
+};
+#define MPI3_IOUNIT13_PAGEVERSION (0x00)
+#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
+#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -1182,6 +1268,7 @@ struct mpi3_driver_page0 {
__le32 reserved18;
};
#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
@@ -1906,19 +1993,30 @@ struct mpi3_pcie_io_unit_page1 {
};
#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
@@ -2169,10 +2267,7 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_IOC (0x0002)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index c29b87de8e18..64c58815988a 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2018-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2018-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IMAGE_H
#define MPI30_IMAGE_H 1
@@ -63,6 +62,9 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index aac11c58cca9..3c03610ecfa6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -1,13 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_INIT_H
#define MPI30_INIT_H 1
struct mpi3_scsi_io_cdb_eedp32 {
u8 cdb[20];
- __be32 primary_reference_tag;
+ __be32 primary_reference_tag;
__le16 primary_application_tag;
__le16 primary_application_tag_mask;
__le32 transfer_length;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 214e4c65e576..1c6c6730df5c 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IOC_H
#define MPI30_IOC_H 1
@@ -158,6 +157,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
struct mpi3_mgmt_passthrough_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -637,6 +637,23 @@ struct mpi3_event_data_diag_buffer_status_change {
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40)
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
index 901dbd788940..b7a5df01120d 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*
*/
#ifndef MPI30_PCI_H
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index 298d895e374b..e587f77ccd68 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_SAS_H
#define MPI30_SAS_H 1
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index ba05ea57af25..9b76b9632751 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_TRANSPORT_H
#define MPI30_TRANSPORT_H 1
@@ -19,8 +18,8 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (23)
-#define MPI3_VERSION_DEV (1)
+#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
@@ -212,6 +211,7 @@ struct mpi3_default_reply_descriptor {
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff)
struct mpi3_address_reply_descriptor {
__le64 reply_frame_address;
__le16 request_queue_ci;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0935b2e80662..def4c5e15cd8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -39,6 +39,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+#include <scsi/scsi_transport_sas.h>
#include "mpi/mpi30_transport.h"
#include "mpi/mpi30_cnfg.h"
@@ -55,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.0.0.69.0"
-#define MPI3MR_DRIVER_RELDATE "16-March-2022"
+#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
+#define MPI3MR_DRIVER_RELDATE "08-September-2022"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -97,9 +98,11 @@ extern atomic64_t event_counter;
#define MPI3MR_HOSTTAG_PEL_ABORT 3
#define MPI3MR_HOSTTAG_PEL_WAIT 4
#define MPI3MR_HOSTTAG_BLK_TMS 5
+#define MPI3MR_HOSTTAG_CFG_CMDS 6
+#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7
#define MPI3MR_NUM_DEVRMCMD 16
-#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
@@ -115,6 +118,7 @@ extern atomic64_t event_counter;
/* command/controller interaction timeout definitions in seconds */
#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_PORTENABLE_POLL_INTERVAL 5
#define MPI3MR_ABORTTM_TIMEOUT 60
#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
@@ -126,6 +130,10 @@ extern atomic64_t event_counter;
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */
+
+#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10
+
#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
@@ -274,6 +282,8 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_SYSFS = 23,
MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
MPI3MR_RESET_FROM_FIRMWARE = 27,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
};
/* Queue type definitions */
@@ -421,12 +431,14 @@ struct op_reply_qinfo {
* struct mpi3mr_intr_info - Interrupt cookie information
*
* @mrioc: Adapter instance reference
+ * @os_irq: irq number
* @msix_index: MSIx index
* @op_reply_q: Associated operational reply queue
* @name: Dev name for the irq claiming device
*/
struct mpi3mr_intr_info {
struct mpi3mr_ioc *mrioc;
+ int os_irq;
u16 msix_index;
struct op_reply_qinfo *op_reply_q;
char name[MPI3MR_NAME_LENGTH];
@@ -457,16 +469,138 @@ struct mpi3mr_throttle_group_info {
atomic_t pend_large_data_sz;
};
+/* HBA port flags */
+#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+
+/**
+ * struct mpi3mr_hba_port - HBA's port information
+ * @port_id: Port number
+ * @flags: HBA port flags
+ */
+struct mpi3mr_hba_port {
+ struct list_head list;
+ u8 port_id;
+ u8 flags;
+};
+
+/**
+ * struct mpi3mr_sas_port - Internal SAS port information
+ * @port_list: List of ports belonging to a SAS node
+ * @num_phys: Number of phys associated with port
+ * @marked_responding: used while refresing the sas ports
+ * @lowest_phy: lowest phy ID of current sas port
+ * @phy_mask: phy_mask of current sas port
+ * @hba_port: HBA port entry
+ * @remote_identify: Attached device identification
+ * @rphy: SAS transport layer rphy object
+ * @port: SAS transport layer port object
+ * @phy_list: mpi3mr_sas_phy objects belonging to this port
+ */
+struct mpi3mr_sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ u8 marked_responding;
+ int lowest_phy;
+ u32 phy_mask;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct mpi3mr_sas_phy - Internal SAS Phy information
+ * @port_siblings: List of phys belonging to a port
+ * @identify: Phy identification
+ * @remote_identify: Attached device identification
+ * @phy: SAS transport layer Phy object
+ * @phy_id: Unique phy id within a port
+ * @handle: Firmware device handle for this phy
+ * @attached_handle: Firmware device handle for attached device
+ * @phy_belongs_to_port: Flag to indicate phy belongs to port
+ @hba_port: HBA port entry
+ */
+struct mpi3mr_sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+ struct mpi3mr_hba_port *hba_port;
+};
+
+/**
+ * struct mpi3mr_sas_node - SAS host/expander information
+ * @list: List of sas nodes in a controller
+ * @parent_dev: Parent device class
+ * @num_phys: Number phys belonging to sas_node
+ * @sas_address: SAS address of sas_node
+ * @handle: Firmware device handle for this sas_host/expander
+ * @sas_address_parent: SAS address of parent expander or host
+ * @enclosure_handle: Firmware handle of enclosure of this node
+ * @device_info: Capabilities of this sas_host/expander
+ * @non_responding: used to refresh the expander devices during reset
+ * @host_node: Flag to indicate this is a host_node
+ * @hba_port: HBA port entry
+ * @phy: A list of phys that make up this sas_host/expander
+ * @sas_port_list: List of internal ports of this node
+ * @rphy: sas_rphy object of this expander node
+ */
+struct mpi3mr_sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 non_responding;
+ u8 host_node;
+ struct mpi3mr_hba_port *hba_port;
+ struct mpi3mr_sas_phy *phy;
+ struct list_head sas_port_list;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct mpi3mr_enclosure_node - enclosure information
+ * @list: List of enclosures
+ * @pg0: Enclosure page 0;
+ */
+struct mpi3mr_enclosure_node {
+ struct list_head list;
+ struct mpi3_enclosure_page0 pg0;
+};
+
/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
* @sas_address: World wide unique SAS address
+ * @sas_address_parent: Sas address of parent expander or host
* @dev_info: Device information bits
+ * @phy_id: Phy identifier provided in device page 0
+ * @attached_phy_id: Attached phy identifier provided in device page 0
+ * @sas_transport_attached: Is this device exposed to transport
+ * @pend_sas_rphy_add: Flag to check device is in process of add
+ * @hba_port: HBA port entry
+ * @rphy: SAS transport layer rphy object
*/
struct tgt_dev_sas_sata {
u64 sas_address;
+ u64 sas_address_parent;
u16 dev_info;
+ u8 phy_id;
+ u8 attached_phy_id;
+ u8 sas_transport_attached;
+ u8 pend_sas_rphy_add;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_rphy *rphy;
};
/**
@@ -531,12 +665,16 @@ union _form_spec_inf {
* @slot: Slot number
* @encl_handle: FW enclosure handle
* @perst_id: FW assigned Persistent ID
+ * @devpg0_flag: Device Page0 flag
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_unit_port: IO Unit port ID
+ * @non_stl: Is this device not to be attached with SAS TL
* @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
+ * @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
* @ref_count: Reference count
*/
@@ -548,12 +686,16 @@ struct mpi3mr_tgt_dev {
u16 slot;
u16 encl_handle;
u16 perst_id;
+ u16 devpg0_flag;
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_unit_port;
+ u8 non_stl;
u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
+ u64 enclosure_logical_id;
union _form_spec_inf dev_spec;
struct kref ref_count;
};
@@ -679,6 +821,21 @@ struct mpi3mr_drv_cmd {
struct mpi3mr_drv_cmd *drv_cmd);
};
+/**
+ * struct dma_memory_desc - memory descriptor structure to store
+ * virtual address, dma address and size for any generic dma
+ * memory allocations in the driver.
+ *
+ * @size: buffer size
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct dma_memory_desc {
+ u32 size;
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
/**
* struct chain_element - memory descriptor structure to store
@@ -756,6 +913,7 @@ struct scmd_priv {
* @num_op_reply_q: Number of operational reply queues
* @op_reply_qinfo: Operational reply queue info pointer
* @init_cmds: Command tracker for initialization commands
+ * @cfg_cmds: Command tracker for configuration requests
* @facts: Cached IOC facts data
* @op_reply_desc_sz: Operational reply descriptor size
* @num_reply_bufs: Number of reply buffers allocated
@@ -792,6 +950,7 @@ struct scmd_priv {
* @scan_started: Async scan started
* @scan_failed: Asycn scan failed
* @stop_drv_processing: Stop all command processing
+ * @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
@@ -854,6 +1013,17 @@ struct scmd_priv {
* @io_throttle_low: I/O size to stop throttle in 512b blocks
* @num_io_throttle_group: Maximum number of throttle groups
* @throttle_groups: Pointer to throttle group info structures
+ * @cfg_page: Default memory for configuration pages
+ * @cfg_page_dma: Configuration page DMA address
+ * @cfg_page_sz: Default configuration page memory size
+ * @sas_transport_enabled: SAS transport enabled or not
+ * @scsi_device_channel: Channel ID for SCSI devices
+ * @transport_cmds: Command tracker for SAS transport commands
+ * @sas_hba: SAS node for the controller
+ * @sas_expander_list: SAS node list of expanders
+ * @sas_node_lock: Lock to protect SAS node list
+ * @hba_port_table_list: List of HBA Ports
+ * @enclosure_list: List of Enclosure objects
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -904,6 +1074,7 @@ struct mpi3mr_ioc {
struct op_reply_qinfo *op_reply_qinfo;
struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_drv_cmd cfg_cmds;
struct mpi3mr_ioc_facts facts;
u16 op_reply_desc_sz;
@@ -948,6 +1119,7 @@ struct mpi3mr_ioc {
u8 scan_started;
u16 scan_failed;
u8 stop_drv_processing;
+ u8 device_refresh_on;
u16 max_host_ios;
spinlock_t tgtdev_lock;
@@ -1025,6 +1197,19 @@ struct mpi3mr_ioc {
u32 io_throttle_low;
u16 num_io_throttle_group;
struct mpi3mr_throttle_group_info *throttle_groups;
+
+ void *cfg_page;
+ dma_addr_t cfg_page_dma;
+ u16 cfg_page_sz;
+
+ u8 sas_transport_enabled;
+ u8 scsi_device_channel;
+ struct mpi3mr_drv_cmd transport_cmds;
+ struct mpi3mr_sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head hba_port_table_list;
+ struct list_head enclosure_list;
};
/**
@@ -1149,6 +1334,67 @@ int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
u16 event_data_size);
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
extern const struct attribute_group *mpi3mr_host_groups[];
extern const struct attribute_group *mpi3mr_dev_groups[];
+
+extern struct sas_function_template mpi3mr_transport_functions;
+extern struct scsi_transport_template *mpi3mr_transport_template;
+
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz);
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
+
+u8 mpi3mr_is_expander_device(u16 device_info);
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port);
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle);
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id);
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc);
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port);
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy);
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add);
+void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc);
+void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc);
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index 2464c400a5a4..ee6edd8322e6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -23,9 +23,13 @@
#define MPI3_DEBUG_RESET 0x00000020
#define MPI3_DEBUG_SCSI_ERROR 0x00000040
#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_CFG_ERROR 0x00000100
+#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200
#define MPI3_DEBUG_BSG_ERROR 0x00008000
#define MPI3_DEBUG_BSG_INFO 0x00010000
#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG_CFG_INFO 0x00040000
+#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000
#define MPI3_DEBUG 0x01000000
#define MPI3_DEBUG_SG 0x02000000
@@ -122,6 +126,29 @@
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
+#define dprint_cfg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+#define dprint_transport_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_transport_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0866dfd43318..0c4aabaefdcc 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -244,6 +244,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
desc = "Enclosure Device Status Change";
break;
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ desc = "Enclosure Added";
+ break;
case MPI3_EVENT_HARD_RESET_RECEIVED:
desc = "Hard Reset Received";
break;
@@ -299,6 +302,8 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
switch (host_tag) {
case MPI3MR_HOSTTAG_INITCMDS:
return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_CFG_CMDS:
+ return &mrioc->cfg_cmds;
case MPI3MR_HOSTTAG_BSG_CMDS:
return &mrioc->bsg_cmds;
case MPI3MR_HOSTTAG_BLK_TMS:
@@ -307,6 +312,8 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
return &mrioc->pel_abort_cmd;
case MPI3MR_HOSTTAG_PEL_WAIT:
return &mrioc->pel_cmds;
+ case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
+ return &mrioc->transport_cmds;
case MPI3MR_HOSTTAG_INVALID:
if (def_reply && def_reply->function ==
MPI3_FUNCTION_EVENT_NOTIFICATION)
@@ -424,6 +431,9 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
return 0;
do {
+ if (mrioc->unrecoverable)
+ break;
+
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
if (reply_dma)
@@ -509,6 +519,9 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
+ if (mrioc->unrecoverable)
+ break;
+
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
op_req_q = &mrioc->req_qinfo[req_q_idx];
@@ -530,6 +543,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+#ifndef CONFIG_PREEMPT_RT
/*
* Exit completion loop to avoid CPU lockup
* Ensure remaining completion happens from threaded ISR.
@@ -538,7 +552,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
op_reply_q->enable_irq_poll = true;
break;
}
-
+#endif
} while (1);
writel(reply_ci,
@@ -569,7 +583,8 @@ int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
mrioc = (struct mpi3mr_ioc *)shost->hostdata;
- if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
+ mrioc->unrecoverable))
return 0;
num_entries = mpi3mr_process_op_reply_q(mrioc,
@@ -607,18 +622,16 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
return IRQ_NONE;
}
+#ifndef CONFIG_PREEMPT_RT
+
static irqreturn_t mpi3mr_isr(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
- struct mpi3mr_ioc *mrioc;
- u16 midx;
int ret;
if (!intr_info)
return IRQ_NONE;
- mrioc = intr_info->mrioc;
- midx = intr_info->msix_index;
/* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata);
@@ -633,7 +646,7 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
!atomic_read(&intr_info->op_reply_q->pend_ios))
return ret;
- disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+ disable_irq_nosync(intr_info->os_irq);
return IRQ_WAKE_THREAD;
}
@@ -663,7 +676,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
/* Poll for pending IOs completions */
do {
- if (!mrioc->intr_enabled)
+ if (!mrioc->intr_enabled || mrioc->unrecoverable)
break;
if (!midx)
@@ -679,11 +692,13 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
(num_op_reply < mrioc->max_host_ios));
intr_info->op_reply_q->enable_irq_poll = false;
- enable_irq(pci_irq_vector(mrioc->pdev, midx));
+ enable_irq(intr_info->os_irq);
return IRQ_HANDLED;
}
+#endif
+
/**
* mpi3mr_request_irq - Request IRQ and register ISR
* @mrioc: Adapter instance reference
@@ -706,14 +721,20 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
mrioc->driver_name, mrioc->id, index);
+#ifndef CONFIG_PREEMPT_RT
retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+ NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
if (retval) {
ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
intr_info->name, pci_irq_vector(pdev, index));
return retval;
}
+ intr_info->os_irq = pci_irq_vector(pdev, index);
return retval;
}
@@ -907,6 +928,8 @@ static const struct {
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
+ { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
};
/**
@@ -1130,6 +1153,13 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
return -EPERM;
}
+ if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ ioc_err(mrioc,
+ "critical error: multipath capability is enabled at the\n"
+ "\tcontroller while sas transport support is enabled at the\n"
+ "\tdriver, please reboot the system or reload the driver\n");
+
dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
if (mrioc->facts.max_devhandle % 8)
dev_handle_bitmap_sz++;
@@ -1194,6 +1224,14 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
msleep(100);
} while (--timeout);
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present while waiting to reset\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+
ioc_state = mpi3mr_get_iocstate(mrioc);
ioc_info(mrioc,
"controller is in %s state after waiting to reset\n",
@@ -1251,6 +1289,13 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
mpi3mr_iocstate_name(ioc_state));
return 0;
}
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present at the bringup\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
msleep(100);
} while (--timeout);
@@ -1259,6 +1304,7 @@ out_failed:
ioc_err(mrioc,
"failed to bring to ready state, current state: %s\n",
mpi3mr_iocstate_name(ioc_state));
+out_device_not_present:
return retval;
}
@@ -2163,9 +2209,13 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0;
op_req_q->pi = pi;
+#ifndef CONFIG_PREEMPT_RT
if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
> MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+ atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
writel(op_req_q->pi,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
@@ -2193,6 +2243,17 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
{
u32 ioc_status, host_diagnostic, timeout;
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "controller is unrecoverable\n");
+ return;
+ }
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present\n");
+ return;
+ }
+
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
@@ -2384,9 +2445,21 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
u32 fault, host_diagnostic, ioc_status;
u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
- if (mrioc->reset_in_progress || mrioc->unrecoverable)
+ if (mrioc->reset_in_progress)
return;
+ if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
+ ioc_err(mrioc, "watchdog could not detect the controller\n");
+ mrioc->unrecoverable = 1;
+ }
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc,
+ "flush pending commands for unrecoverable controller\n");
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ return;
+ }
+
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
@@ -2426,11 +2499,12 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->diagsave_timeout = 0;
switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
- ioc_info(mrioc,
+ ioc_warn(mrioc,
"controller requires system power cycle, marking controller as unrecoverable\n");
mrioc->unrecoverable = 1;
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
return;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
@@ -2853,6 +2927,10 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->bsg_cmds.reply)
goto out_failed;
+ mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->transport_cmds.reply)
+ goto out_failed;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
GFP_KERNEL);
@@ -3362,10 +3440,13 @@ out_failed:
static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd)
{
- drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
- mrioc->scan_failed = drv_cmd->ioc_status;
mrioc->scan_started = 0;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ else
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
}
/**
@@ -3447,6 +3528,7 @@ static const struct {
char *name;
} mpi3mr_capabilities[] = {
{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
};
/**
@@ -3657,6 +3739,7 @@ static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
@@ -3727,6 +3810,14 @@ retry_init:
mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
MPI3MR_HOST_IOS_KDUMP);
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ mrioc->sas_transport_enabled = 1;
+ mrioc->scsi_device_channel = 1;
+ mrioc->shost->max_channel = 1;
+ mrioc->shost->transportt = mpi3mr_transport_template;
+ }
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -3738,6 +3829,14 @@ retry_init:
mpi3mr_print_ioc_info(mrioc);
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page)
+ goto out_failed_noretry;
+
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
ioc_err(mrioc,
@@ -3795,8 +3894,7 @@ retry_init:
if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
dprint_init(mrioc, "allocating memory for throttle groups\n");
sz = sizeof(struct mpi3mr_throttle_group_info);
- mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
- kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
if (!mrioc->throttle_groups)
goto out_failed_noretry;
}
@@ -3845,8 +3943,12 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
int retval = 0;
u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 pe_timeout, ioc_status;
retry_init:
+ pe_timeout =
+ (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
+
dprint_reset(mrioc, "bringing up the controller to ready state\n");
retval = mpi3mr_bring_ioc_ready(mrioc);
if (retval) {
@@ -3936,12 +4038,50 @@ retry_init:
goto out_failed;
}
+ mrioc->device_refresh_on = 1;
+ mpi3mr_add_event_wait_for_device_refresh(mrioc);
+
ioc_info(mrioc, "sending port enable\n");
- retval = mpi3mr_issue_port_enable(mrioc, 0);
+ retval = mpi3mr_issue_port_enable(mrioc, 1);
if (retval) {
ioc_err(mrioc, "failed to issue port enable\n");
goto out_failed;
}
+ do {
+ ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
+ if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
+ break;
+ if (!pci_device_is_present(mrioc->pdev))
+ mrioc->unrecoverable = 1;
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ }
+ } while (--pe_timeout);
+
+ if (!pe_timeout) {
+ ioc_err(mrioc, "port enable timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ } else if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable completed successfully\n");
ioc_info(mrioc, "controller %s completed successfully\n",
(is_resume)?"resume":"re-initialization");
@@ -4042,6 +4182,8 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
sizeof(*mrioc->pel_cmds.reply));
memset(mrioc->pel_abort_cmd.reply, 0,
sizeof(*mrioc->pel_abort_cmd.reply));
+ memset(mrioc->transport_cmds.reply, 0,
+ sizeof(*mrioc->transport_cmds.reply));
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
memset(mrioc->dev_rmhs_cmds[i].reply, 0,
sizeof(*mrioc->dev_rmhs_cmds[i].reply));
@@ -4102,6 +4244,8 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
u16 i;
struct mpi3mr_intr_info *intr_info;
+ mpi3mr_free_enclosure_list(mrioc);
+
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)
dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
@@ -4187,6 +4331,9 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
kfree(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
+ kfree(mrioc->transport_cmds.reply);
+ mrioc->transport_cmds.reply = NULL;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
kfree(mrioc->dev_rmhs_cmds[i].reply);
mrioc->dev_rmhs_cmds[i].reply = NULL;
@@ -4355,13 +4502,17 @@ static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
*
* Return: Nothing.
*/
-static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_drv_cmd *cmdptr;
u8 i;
cmdptr = &mrioc->init_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
cmdptr = &mrioc->bsg_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
cmdptr = &mrioc->host_tm_cmds;
@@ -4383,6 +4534,8 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
cmdptr = &mrioc->pel_abort_cmd;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->transport_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
/**
@@ -4681,6 +4834,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "controller reset is triggered by %s\n",
mpi3mr_reset_rc_name(reset_reason));
+ mrioc->device_refresh_on = 0;
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
@@ -4739,6 +4893,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_flush_host_io(mrioc);
mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+
if (mrioc->prepare_for_reset) {
mrioc->prepare_for_reset = 0;
mrioc->prepare_for_reset_timeout_counter = 0;
@@ -4750,7 +4906,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mrioc->name, reset_reason);
goto out;
}
- ssleep(10);
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
out:
if (!retval) {
@@ -4762,7 +4918,8 @@ out:
mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
}
- mpi3mr_rfresh_tgtdevs(mrioc);
+ mrioc->device_refresh_on = 0;
+
mrioc->ts_update_counter = 0;
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
if (mrioc->watchdog_work_q)
@@ -4776,9 +4933,11 @@ out:
} else {
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
retval = -1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
}
mrioc->prev_reset_result = retval;
mutex_unlock(&mrioc->reset_mutex);
@@ -4786,3 +4945,836 @@ out:
((retval == 0) ? "successful" : "failed"));
return retval;
}
+
+
+/**
+ * mpi3mr_free_config_dma_memory - free memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: memory descriptor structure
+ *
+ * Check whether the size of the buffer specified by the memory
+ * descriptor is greater than the default page size if so then
+ * free the memory pointed by the descriptor.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+}
+
+/**
+ * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: Memory descriptor to hold dma memory info
+ *
+ * This function allocates new dmaable memory or provides the
+ * default config page dmaable memory based on the memory size
+ * described by the descriptor.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->size > mrioc->cfg_page_sz) {
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+ if (!mem_desc->addr)
+ return -ENOMEM;
+ } else {
+ mem_desc->addr = mrioc->cfg_page;
+ mem_desc->dma_addr = mrioc->cfg_page_dma;
+ memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+ }
+ return 0;
+}
+
+/**
+ * mpi3mr_post_cfg_req - Issue config requests and wait
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 configuration request to
+ * the firmware. This blocks for the completion of request for
+ * timeout seconds and if the request times out this function
+ * faults the controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->cfg_cmds.mutex);
+ if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending config request failed due to command in use\n");
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+ goto out;
+ }
+ mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->cfg_cmds.is_waiting = 1;
+ mrioc->cfg_cmds.callback = NULL;
+ mrioc->cfg_cmds.ioc_status = 0;
+ mrioc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
+ cfg_req->function = MPI3_FUNCTION_CONFIG;
+
+ init_completion(&mrioc->cfg_cmds.done);
+ dprint_cfg_info(mrioc, "posting config request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
+ "mpi3_cfg_req");
+ retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting config request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
+ if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ ioc_err(mrioc, "config request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_cfg_err(mrioc,
+ "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
+
+out_unlock:
+ mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_cfg_req - config page request processor
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @cfg_hdr: Configuration page header
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ * @cfg_buf: Memory pointer to copy config page or header
+ * @cfg_buf_sz: Size of the memory to get config page or header
+ *
+ * This is handler for config page read, write and config page
+ * header read operations.
+ *
+ * This function expects the cfg_req to be populated with page
+ * type, page number, action for the header read and with page
+ * address for all other operations.
+ *
+ * The cfg_hdr can be passed as null for reading required header
+ * details for read/write pages the cfg_hdr should point valid
+ * configuration page header.
+ *
+ * This allocates dmaable memory based on the size of the config
+ * buffer and set the SGE of the cfg_req.
+ *
+ * For write actions, the config page data has to be passed in
+ * the cfg_buf and size of the data has to be mentioned in the
+ * cfg_buf_sz.
+ *
+ * For read/header actions, on successful completion of the
+ * request with successful ioc_status the data will be copied
+ * into the cfg_buf limited to a minimum of actual page size and
+ * cfg_buf_sz
+ *
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req,
+ struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
+ void *cfg_buf, u32 cfg_buf_sz)
+{
+ struct dma_memory_desc mem_desc;
+ int retval = -1;
+ u8 invalid_action = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
+
+ if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(struct mpi3_config_page_header);
+ else {
+ if (!cfg_hdr) {
+ ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number);
+ goto out;
+ }
+ switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
+ case MPI3_CONFIG_PAGEATTR_READ_ONLY:
+ if (cfg_req->action
+ != MPI3_CONFIG_ACTION_READ_CURRENT)
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
+ if ((cfg_req->action ==
+ MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
+ (cfg_req->action ==
+ MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_PERSISTENT:
+ default:
+ break;
+ }
+ if (invalid_action) {
+ ioc_err(mrioc,
+ "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number, cfg_hdr->page_attribute);
+ goto out;
+ }
+ mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
+ cfg_req->page_length = cfg_hdr->page_length;
+ cfg_req->page_version = cfg_hdr->page_version;
+ }
+ if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+ goto out;
+
+ mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ mem_desc.dma_addr);
+
+ if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
+ (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer to be written\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+ if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
+ goto out;
+
+ retval = 0;
+ if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer read\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+out:
+ mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
+ return retval;
+}
+
+/**
+ * mpi3mr_cfg_get_dev_pg0 - Read current device page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @dev_pg0: Pointer to return device page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific device
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(dev_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "device page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
+ (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
+ ioc_err(mrioc, "device page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg0: Pointer to return SAS Phy page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas phy page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg1: Pointer to return SAS Phy page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page1. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas phy page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg0: Pointer to return SAS Expander page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page0. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
+ ioc_err(mrioc, "expander page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg1: Pointer to return SAS Expander page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page1. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
+ ioc_err(mrioc, "expander page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @encl_pg0: Pointer to return Enclosure page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific Enclosure
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(encl_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "enclosure page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
+ (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
+ ioc_err(mrioc, "enclosure page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page0. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page0 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page0 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page write for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not. This will modify both current
+ * and persistent page.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write current failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
+ * @mrioc: Adapter instance reference
+ * @driver_pg1: Pointer to return Driver page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the Driver page1.
+ * This routine checks ioc_status to decide whether the page
+ * read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(driver_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "driver page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
+ ioc_err(mrioc, "driver page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 9681c8bf24ed..f77ee4051b00 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -40,6 +40,8 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -422,6 +424,8 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
tgt_priv->io_throttle_enabled = 0;
tgt_priv->io_divert = 0;
tgt_priv->throttle_group = NULL;
+ if (tgtdev->host_exposed)
+ atomic_set(&tgt_priv->block_io, 1);
}
}
}
@@ -579,6 +583,39 @@ void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
+ * @mrioc: Adapter instance reference
+ *
+ * This function waits for currently running IO poll threads to
+ * exit and then flushes all host I/Os and any internal pending
+ * cmds. This is executed after controller is marked as
+ * unrecoverable.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+ int i;
+
+ if (!mrioc->unrecoverable)
+ return;
+
+ if (mrioc->op_reply_qinfo) {
+ for (i = 0; i < mrioc->num_queues; i++) {
+ while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
+ udelay(500);
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ }
+ }
+ mrioc->flush_io_count = 0;
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+}
+
+/**
* mpi3mr_alloc_tgtdev - target device allocator
*
* Allocate target device instance and initialize the reference
@@ -796,7 +833,7 @@ static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
*
* Return: None.
*/
-static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
bool device_add)
{
ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
@@ -816,7 +853,7 @@ static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
*
* Return: 0 on success, non zero on failure.
*/
-static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
struct mpi3mr_tgt_dev *tgtdev)
{
struct mpi3mr_stgt_priv_data *tgt_priv;
@@ -825,22 +862,29 @@ static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
+ atomic_set(&tgt_priv->block_io, 0);
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
}
- if (tgtdev->starget) {
- if (mrioc->current_event)
- mrioc->current_event->pending_at_sml = 1;
- scsi_remove_target(&tgtdev->starget->dev);
- tgtdev->host_exposed = 0;
- if (mrioc->current_event) {
- mrioc->current_event->pending_at_sml = 0;
- if (mrioc->current_event->discard) {
- mpi3mr_print_device_event_notice(mrioc, false);
- return;
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
+ if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc,
+ false);
+ return;
+ }
}
}
- }
+ } else
+ mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
+
ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
}
@@ -862,21 +906,25 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev;
+ if (mrioc->reset_in_progress)
+ return -1;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (!tgtdev) {
retval = -1;
goto out;
}
- if (tgtdev->is_hidden) {
+ if (tgtdev->is_hidden || tgtdev->host_exposed) {
retval = -1;
goto out;
}
- if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
tgtdev->host_exposed = 1;
if (mrioc->current_event)
mrioc->current_event->pending_at_sml = 1;
- scsi_scan_target(&mrioc->shost->shost_gendev, 0,
- tgtdev->perst_id,
+ scsi_scan_target(&mrioc->shost->shost_gendev,
+ mrioc->scsi_device_channel, tgtdev->perst_id,
SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (!tgtdev->starget)
tgtdev->host_exposed = 0;
@@ -887,7 +935,8 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
goto out;
}
}
- }
+ } else
+ mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
out:
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
@@ -1018,18 +1067,29 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
u16 flags = 0;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->io_unit_port = dev_pg0->io_unit_port;
tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
tgtdev->slot = le16_to_cpu(dev_pg0->slot);
tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+ tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
+
+ if (tgtdev->encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ tgtdev->encl_handle);
+ if (enclosure_dev)
+ tgtdev->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+
+ flags = tgtdev->devpg0_flag;
- flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
if (is_added == true)
@@ -1045,6 +1105,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
scsi_tgt_priv_data->io_throttle_enabled =
tgtdev->io_throttle_enabled;
+ if (is_added == true)
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
}
switch (dev_pg0->access_status) {
@@ -1068,12 +1130,25 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
tgtdev->dev_spec.sas_sata_inf.sas_address =
le64_to_cpu(sasinf->sas_address);
+ tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
+ tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
+ sasinf->attached_phy_identifier;
if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
tgtdev->is_hidden = 1;
else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
tgtdev->is_hidden = 1;
+
+ if (((tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
+ && (tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
+ (tgtdev->parent_handle == 0xFFFF))
+ tgtdev->non_stl = 1;
+ if (tgtdev->dev_spec.sas_sata_inf.hba_port)
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
+ dev_pg0->io_unit_port;
break;
}
case MPI3_DEVICE_DEVFORM_PCIE:
@@ -1106,6 +1181,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
if (!mrioc->shost)
break;
prot_mask = scsi_host_get_prot(mrioc->shost);
@@ -1129,6 +1205,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
tgtdev->dev_spec.vd_inf.tg_high =
le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
@@ -1258,6 +1335,135 @@ out:
}
/**
+ * mpi3mr_free_enclosure_list - release enclosures
+ * @mrioc: Adapter instance reference
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &mrioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
+ * mpi3mr_enclosure_find_by_handle - enclosure search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the enclosure
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ *
+ * Return: Enclosure object reference or NULL
+ */
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
+
+ list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
+ * @mrioc: Adapter instance reference
+ * @encl_pg0: Enclosure page 0.
+ * @is_added: Added event or not
+ *
+ * Return nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
+{
+ char *reason_str = NULL;
+
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ if (is_added)
+ reason_str = "enclosure added";
+ else
+ reason_str = "enclosure dev status changed";
+
+ ioc_info(mrioc,
+ "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
+ reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
+ (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
+ ioc_info(mrioc,
+ "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
+ le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
+ le16_to_cpu(encl_pg0->flags),
+ ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the Enclosure device status or
+ * Enclosure add events if logging is enabled and add or remove
+ * the enclosure from the controller's internal list of
+ * enclosures.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ struct mpi3_enclosure_page0 *encl_pg0;
+ u16 encl_handle;
+ u8 added, present;
+
+ encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
+ added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
+ mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
+
+
+ encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
+ present = ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
+
+ if (encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ encl_handle);
+ if (!enclosure_dev && present) {
+ enclosure_dev =
+ kzalloc(sizeof(struct mpi3mr_enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev)
+ return;
+ list_add_tail(&enclosure_dev->list,
+ &mrioc->enclosure_list);
+ }
+ if (enclosure_dev) {
+ if (!present) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ } else
+ memcpy(&enclosure_dev->pg0, encl_pg0,
+ sizeof(enclosure_dev->pg0));
+
+ }
+}
+
+/**
* mpi3mr_sastopochg_evt_debug - SASTopoChange details
* @mrioc: Adapter instance reference
* @event_data: SAS topology change list event data
@@ -1296,8 +1502,9 @@ mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "%s :sas topology change: (%s)\n",
__func__, status_str);
ioc_info(mrioc,
- "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
__func__, le16_to_cpu(event_data->expander_dev_handle),
+ event_data->io_unit_port,
le16_to_cpu(event_data->enclosure_handle),
event_data->start_phy_num, event_data->num_entries);
for (i = 0; i < event_data->num_entries; i++) {
@@ -1355,9 +1562,30 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
int i;
u16 handle;
u8 reason_code;
+ u64 exp_sas_address = 0, parent_sas_address = 0;
+ struct mpi3mr_hba_port *hba_port = NULL;
struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_sas_node *sas_expander = NULL;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate, parent_phy_number;
mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ if (mrioc->sas_transport_enabled) {
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc,
+ event_data->io_unit_port);
+ if (le16_to_cpu(event_data->expander_dev_handle)) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
+ le16_to_cpu(event_data->expander_dev_handle));
+ if (sas_expander) {
+ exp_sas_address = sas_expander->sas_address;
+ hba_port = sas_expander->hba_port;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ parent_sas_address = exp_sas_address;
+ } else
+ parent_sas_address = mrioc->sas_hba.sas_address;
+ }
for (i = 0; i < event_data->num_entries; i++) {
if (fwevt->discard)
@@ -1379,12 +1607,37 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ {
+ if (!mrioc->sas_transport_enabled || tgtdev->non_stl
+ || tgtdev->is_hidden)
+ break;
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ if (link_rate == prev_link_rate)
+ break;
+ if (!parent_sas_address)
+ break;
+ parent_phy_number = event_data->start_phy_num + i;
+ mpi3mr_update_links(mrioc, parent_sas_address, handle,
+ parent_phy_number, link_rate, hba_port);
+ break;
+ }
default:
break;
}
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
}
+
+ if (mrioc->sas_transport_enabled && (event_data->exp_status ==
+ MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
+ if (sas_expander)
+ mpi3mr_expander_remove(mrioc, exp_sas_address,
+ hba_port);
+ }
}
/**
@@ -1604,28 +1857,54 @@ static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
struct mpi3mr_fwevt *fwevt)
{
+ struct mpi3_device_page0 *dev_pg0 = NULL;
+ u16 perst_id, handle, dev_info;
+ struct mpi3_device0_sas_sata_format *sasinf = NULL;
+
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
mrioc->current_event = fwevt;
if (mrioc->stop_drv_processing)
goto out;
+ if (mrioc->unrecoverable) {
+ dprint_event_bh(mrioc,
+ "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
+ fwevt->event_id);
+ goto out;
+ }
+
if (!fwevt->process_evt)
goto evt_ack;
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
{
- struct mpi3_device_page0 *dev_pg0 =
- (struct mpi3_device_page0 *)fwevt->event_data;
- mpi3mr_report_tgtdev_to_host(mrioc,
- le16_to_cpu(dev_pg0->persistent_id));
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ handle = le16_to_cpu(dev_pg0->dev_handle);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ else if (mrioc->sas_transport_enabled &&
+ (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ sasinf = &dev_pg0->device_specific.sas_sata_format;
+ dev_info = le16_to_cpu(sasinf->device_info);
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_is_expander_device(dev_info))
+ mpi3mr_expander_add(mrioc, handle);
+ }
break;
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
{
- mpi3mr_devinfochg_evt_bh(mrioc,
- (struct mpi3_device_page0 *)fwevt->event_data);
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
break;
}
case MPI3_EVENT_DEVICE_STATUS_CHANGE:
@@ -1633,6 +1912,13 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
+ break;
+ }
+
case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
{
mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
@@ -1662,6 +1948,22 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
}
break;
}
+ case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
+ {
+ while (mrioc->device_refresh_on)
+ msleep(500);
+
+ dprint_event_bh(mrioc,
+ "scan for non responding and newly added devices after soft reset started\n");
+ if (mrioc->sas_transport_enabled) {
+ mpi3mr_refresh_sas_ports(mrioc);
+ mpi3mr_refresh_expanders(mrioc);
+ }
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ ioc_info(mrioc,
+ "scan for non responding and newly added devices after soft reset completed\n");
+ break;
+ }
default:
break;
}
@@ -1716,6 +2018,9 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
u16 perst_id = 0;
perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
+ return retval;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
@@ -2429,6 +2734,35 @@ static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
+ * @mrioc: Adapter instance reference
+ *
+ * Add driver specific event to make sure that the driver won't process the
+ * events until all the devices are refreshed during soft reset.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ fwevt = mpi3mr_alloc_fwevt(0);
+ if (!fwevt) {
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for event(0x%02x)\n",
+ MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
+ return;
+ }
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = 0;
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_os_handle_events - Firmware event handler
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -2494,6 +2828,8 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
case MPI3_EVENT_LOG_DATA:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
{
process_evt_bh = 1;
break;
@@ -2508,7 +2844,6 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
break;
}
- case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
case MPI3_EVENT_SAS_DISCOVERY:
case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
@@ -3849,9 +4184,10 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
struct scsi_target *starget;
+ struct sas_rphy *rphy = NULL;
if (!sdev->hostdata)
return;
@@ -3864,7 +4200,14 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
scsi_tgt_priv_data->num_luns--;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
tgt_dev->starget = NULL;
if (tgt_dev)
@@ -3929,16 +4272,23 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
struct scsi_target *starget;
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
mrioc = shost_priv(shost);
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
if (!tgt_dev)
return -ENXIO;
@@ -3986,11 +4336,12 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
unsigned long flags;
struct scsi_target *starget;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
@@ -3998,7 +4349,14 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
scsi_tgt_priv_data = starget->hostdata;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
if (tgt_dev) {
if (tgt_dev->starget == NULL)
@@ -4041,6 +4399,8 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
struct mpi3mr_tgt_dev *tgt_dev;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
+ bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -4049,8 +4409,25 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden) {
+
+ if (starget->channel == mrioc->scsi_device_channel) {
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden)
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ } else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ }
+
+ if (update_stgt_priv_data) {
scsi_tgt_priv_data->starget = starget;
scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
@@ -4064,8 +4441,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
scsi_tgt_priv_data->throttle_group =
tgt_dev->dev_spec.vd_inf.tg;
- } else
- retval = -ENXIO;
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -4254,6 +4630,16 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
dev_handle = stgt_priv_data->dev_handle;
if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4266,16 +4652,6 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- if (atomic_read(&stgt_priv_data->block_io)) {
- if (mrioc->stop_drv_processing) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- retval = SCSI_MLQUEUE_DEVICE_BUSY;
- goto out;
- }
-
if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
is_pcie_dev = 1;
if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
@@ -4553,16 +4929,23 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&mrioc->tgtdev_lock);
spin_lock_init(&mrioc->watchdog_lock);
spin_lock_init(&mrioc->chain_buf_lock);
+ spin_lock_init(&mrioc->sas_node_lock);
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
+ INIT_LIST_HEAD(&mrioc->sas_expander_list);
+ INIT_LIST_HEAD(&mrioc->hba_port_table_list);
+ INIT_LIST_HEAD(&mrioc->enclosure_list);
mutex_init(&mrioc->reset_mutex);
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
+ MPI3MR_HOSTTAG_TRANSPORT_CMDS);
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
@@ -4697,6 +5080,11 @@ static void mpi3mr_remove(struct pci_dev *pdev)
while (mrioc->reset_in_progress || mrioc->is_driver_loading)
ssleep(1);
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+
mpi3mr_bsg_exit(mrioc);
mrioc->stop_drv_processing = 1;
mpi3mr_cleanup_fwevt_list(mrioc);
@@ -4706,7 +5094,11 @@ static void mpi3mr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- scsi_remove_host(shost);
+
+ if (mrioc->sas_transport_enabled)
+ sas_remove_host(shost);
+ else
+ scsi_remove_host(shost);
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -4763,22 +5155,21 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
mpi3mr_cleanup_resources(mrioc);
}
-#ifdef CONFIG_PM
/**
* mpi3mr_suspend - PCI power management suspend callback
- * @pdev: PCI device instance
- * @state: New power state
+ * @dev: Device struct
*
* Change the power state to the given value and cleanup the IOC
* by issuing MUR and shutdown notification
*
* Return: 0 always.
*/
-static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+mpi3mr_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
- pci_power_t device_state;
if (!shost)
return 0;
@@ -4792,27 +5183,26 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_stop_watchdog(mrioc);
mpi3mr_cleanup_ioc(mrioc);
- device_state = pci_choose_state(pdev, state);
- ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- pdev, pci_name(pdev), device_state);
- pci_save_state(pdev);
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
mpi3mr_cleanup_resources(mrioc);
- pci_set_power_state(pdev, device_state);
return 0;
}
/**
* mpi3mr_resume - PCI power management resume callback
- * @pdev: PCI device instance
+ * @dev: Device struct
*
* Restore the power state to D0 and reinitialize the controller
* and resume I/O operations to the target devices
*
* Return: 0 on success, non-zero on failure
*/
-static int mpi3mr_resume(struct pci_dev *pdev)
+static int __maybe_unused
+mpi3mr_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
pci_power_t device_state = pdev->current_state;
@@ -4825,9 +5215,6 @@ static int mpi3mr_resume(struct pci_dev *pdev)
ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
mrioc->pdev = pdev;
mrioc->cpu_count = num_online_cpus();
r = mpi3mr_setup_resources(mrioc);
@@ -4838,18 +5225,21 @@ static int mpi3mr_resume(struct pci_dev *pdev)
}
mrioc->stop_drv_processing = 0;
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
mpi3mr_memset_buffers(mrioc);
r = mpi3mr_reinit_ioc(mrioc, 1);
if (r) {
ioc_err(mrioc, "resuming controller failed[%d]\n", r);
return r;
}
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
scsi_unblock_requests(shost);
+ mrioc->device_refresh_on = 0;
mpi3mr_start_watchdog(mrioc);
return 0;
}
-#endif
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
@@ -4860,16 +5250,15 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
+
static struct pci_driver mpi3mr_pci_driver = {
.name = MPI3MR_DRIVER_NAME,
.id_table = mpi3mr_pci_id_table,
.probe = mpi3mr_probe,
.remove = mpi3mr_remove,
.shutdown = mpi3mr_shutdown,
-#ifdef CONFIG_PM
- .suspend = mpi3mr_suspend,
- .resume = mpi3mr_resume,
-#endif
+ .driver.pm = &mpi3mr_pm_ops,
};
static ssize_t event_counter_show(struct device_driver *dd, char *buf)
@@ -4885,18 +5274,33 @@ static int __init mpi3mr_init(void)
pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
MPI3MR_DRIVER_VERSION);
+ mpi3mr_transport_template =
+ sas_attach_transport(&mpi3mr_transport_functions);
+ if (!mpi3mr_transport_template) {
+ pr_err("%s failed to load due to sas transport attach failure\n",
+ MPI3MR_DRIVER_NAME);
+ return -ENODEV;
+ }
+
ret_val = pci_register_driver(&mpi3mr_pci_driver);
if (ret_val) {
pr_err("%s failed to load due to pci register driver failure\n",
MPI3MR_DRIVER_NAME);
- return ret_val;
+ goto err_pci_reg_fail;
}
ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
&driver_attr_event_counter);
if (ret_val)
- pci_unregister_driver(&mpi3mr_pci_driver);
+ goto err_event_counter;
+
+ return ret_val;
+
+err_event_counter:
+ pci_unregister_driver(&mpi3mr_pci_driver);
+err_pci_reg_fail:
+ sas_release_transport(mpi3mr_transport_template);
return ret_val;
}
@@ -4913,6 +5317,7 @@ static void __exit mpi3mr_exit(void)
driver_remove_file(&mpi3mr_pci_driver.driver,
&driver_attr_event_counter);
pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
}
module_init(mpi3mr_init);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
new file mode 100644
index 000000000000..3fc897336b5e
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -0,0 +1,3291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
+
+/**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+ * @request: Properly populated MPI3 request
+ * @request_sz: Size of the MPI3 request
+ * @reply: Pointer to return MPI3 reply
+ * @reply_sz: Size of the MPI3 reply buffer
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 requests from the SAS
+ * transport layer that uses transport command infrastructure.
+ * This blocks for the completion of request for timeout seconds
+ * and if the request times out this function faults the
+ * controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request,
+ u16 request_sz, void *reply, u16 reply_sz, int timeout,
+ u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->transport_cmds.mutex);
+ if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending transport request failed due to command in use\n");
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+ goto out;
+ }
+ mrioc->transport_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->transport_cmds.is_waiting = 1;
+ mrioc->transport_cmds.callback = NULL;
+ mrioc->transport_cmds.ioc_status = 0;
+ mrioc->transport_cmds.ioc_loginfo = 0;
+
+ init_completion(&mrioc->transport_cmds.done);
+ dprint_cfg_info(mrioc, "posting transport request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)
+ dprint_dump(request, request_sz, "transport_req");
+ retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1);
+ if (retval) {
+ ioc_err(mrioc, "posting transport request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->transport_cmds.done,
+ (timeout * HZ));
+ if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT);
+ ioc_err(mrioc, "transport request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->transport_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_transport_err(mrioc,
+ "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->transport_cmds.ioc_loginfo);
+
+ if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID))
+ memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz);
+
+out_unlock:
+ mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mpi3mr_report_manufacture - obtain SMP report_manufacture
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the expander device
+ * @edev: SAS transport layer sas_expander_device object
+ * @port_id: ID of the HBA port
+ *
+ * Fills in the sas_expander_device with manufacturing info.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc = 0;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u8 *tmp;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev,
+ data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ manufacture_reply = data_out + data_out_sz;
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) port_id;
+ mpi_request.sas_address = cpu_to_le64(sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n",
+ (unsigned long long)sas_address, port_id);
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "report manufacturer SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "report manufacturer - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct rep_manu_reply)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ return rc;
+}
+
+/**
+ * __mpi3mr_expander_find_by_handle - expander search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the expander
+ *
+ * Context: The caller should acquire sas_node_lock
+ *
+ * This searches for expander device based on handle, then
+ * returns the sas_node object.
+ *
+ * Return: Expander sas_node object reference or NULL
+ */
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpi3mr_is_expander_device - if device is an expander
+ * @device_info: Bitfield providing information about the device
+ *
+ * Return: 1 if the device is expander device, else 0.
+ */
+u8 mpi3mr_is_expander_device(u16 device_info)
+{
+ if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) ==
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * mpi3mr_get_sas_address - retrieve sas_address for handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @sas_address: Address to hold sas address
+ *
+ * This function issues device page0 read for a given device
+ * handle and gets the SAS address and return it back
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle,
+ u64 *sas_address)
+{
+ struct mpi3_device_page0 dev_pg0;
+ u16 ioc_status;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ *sas_address = 0;
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (le16_to_cpu(dev_pg0.flags) &
+ MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE)
+ *sas_address = mrioc->sas_hba.sas_address;
+ else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) {
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ *sas_address = le64_to_cpu(sasinf->sas_address);
+ } else {
+ ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n",
+ __func__, dev_pg0.device_form);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Context: This function will acquire tgtdev_lock and will
+ * release before returning the mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ if (!hba_port)
+ goto out;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+out:
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_device_by_sas_address - remove the device
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device using sas address and hba
+ * port pointer then removes it from the OS.
+ *
+ * Return: None
+ */
+static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ u8 was_on_tgtdev_list = 0;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc,
+ sas_address, hba_port);
+ if (tgtdev) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ was_on_tgtdev_list = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (was_on_tgtdev_list) {
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @rphy: SAS transport layer rphy object
+ *
+ * This searches for target device from sas address and rphy
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_expander_find_by_sas_address - sas expander search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander
+ * @hba_port: HBA port entry
+ *
+ * Return: A valid SAS expander node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander, *r = NULL;
+
+ if (!hba_port)
+ goto out;
+
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if ((sas_expander->sas_address != sas_address) ||
+ (sas_expander->hba_port != hba_port))
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * __mpi3mr_sas_node_find_by_sas_address - sas node search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander or sas host
+ * @hba_port: HBA port entry
+ * Context: Caller should acquire mrioc->sas_node_lock.
+ *
+ * If the SAS address indicates the device is direct attached to
+ * the controller (controller's SAS address) then the SAS node
+ * associated with the controller is returned back else the SAS
+ * address and hba port are used to identify the exact expander
+ * and the associated sas_node object is returned. If there is
+ * no match NULL is returned.
+ *
+ * Return: A valid SAS node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+
+ if (mrioc->sas_hba.sas_address == sas_address)
+ return &mrioc->sas_hba;
+ return mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+}
+
+/**
+ * mpi3mr_parent_present - Is parent present for a phy
+ * @mrioc: Adapter instance reference
+ * @phy: SAS transport layer phy object
+ *
+ * Return: 0 if parent is present else non-zero
+ */
+static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy)
+{
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ if (__mpi3mr_sas_node_find_by_sas_address(mrioc,
+ phy->identify.sas_address,
+ hba_port) == NULL) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return 0;
+}
+
+/**
+ * mpi3mr_convert_phy_link_rate -
+ * @link_rate: link rate as defined in the MPI header
+ *
+ * Convert link_rate from mpi format into sas_transport layer
+ * form.
+ *
+ * Return: A valid SAS transport layer defined link rate
+ */
+static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI3_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ default:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * mpi3mr_delete_sas_phy - Remove a single phy from port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mr_sas_phy->phy_id);
+
+ list_del(&mr_sas_phy->port_siblings);
+ mr_sas_port->num_phys--;
+ mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+ if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * mpi3mr_add_sas_phy - Adding a single phy to a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mr_sas_phy->phy_id);
+
+ list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+ if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * mpi3mr_add_phy_to_an_existing_port - add phy to existing port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object *
+ * @sas_address: SAS address of device/expander were phy needs
+ * to be added to
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_port *mr_sas_port;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ if (!hba_port)
+ return;
+
+ list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy == mr_sas_phy)
+ return;
+ }
+ mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
+ return;
+ }
+}
+
+/**
+ * mpi3mr_delete_sas_port - helper function to removing a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port;
+ enum sas_device_type device_type =
+ mr_sas_port->remote_identify.device_type;
+
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ if (device_type == SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc, sas_address,
+ hba_port);
+
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc, sas_address, hba_port);
+}
+
+/**
+ * mpi3mr_del_phy_from_an_existing_port - del phy from a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy != mr_sas_phy)
+ continue;
+ if ((mr_sas_port->num_phys == 1) &&
+ !mrioc->reset_in_progress)
+ mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+ else
+ mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+ mr_sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * mpi3mr_sas_port_sanity_check - sanity check while adding port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @sas_address: SAS address of device/expander
+ * @hba_port: HBA port entry
+ *
+ * Verifies whether the Phys attached to a device with the given
+ * SAS address already belongs to an existing sas port if so
+ * will remove those phys from the sas port
+ *
+ * Return: None.
+ */
+static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ sas_address) || (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ if (mr_sas_node->phy[i].phy_belongs_to_port == 1)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ mr_sas_node, &mr_sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpi3mr_set_identify - set identify for phys and end devices
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @identify: SAS transport layer's identify info
+ *
+ * Populates sas identify info for a specific device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct sas_identify *identify)
+{
+
+ struct mpi3_device_page0 device_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ u16 device_info;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
+ sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ sasinf = &device_pg0.device_specific.sas_sata_format;
+ device_info = le16_to_cpu(sasinf->device_info);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sasinf->sas_address);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sasinf->phy_num;
+
+ /* device_type */
+ switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) {
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for SATA INIT so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+
+ /* target_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for STP Target so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET)
+ identify->target_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_host_phy - report sas_host phy to SAS transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @phy_pg0: SAS phy page 0
+ * @parent_dev: Prent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_expander_phy - report expander phy to transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @expander_pg1: SAS Expander page 1
+ * @parent_dev: Parent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy,
+ struct mpi3_sas_expander_page1 expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_hba_port - alloc hba port object
+ * @mrioc: Adapter instance reference
+ * @port_id: Port number
+ *
+ * Alloc memory for hba port object.
+ */
+static struct mpi3mr_hba_port *
+mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
+{
+ struct mpi3mr_hba_port *hba_port;
+
+ hba_port = kzalloc(sizeof(struct mpi3mr_hba_port),
+ GFP_KERNEL);
+ if (!hba_port)
+ return NULL;
+ hba_port->port_id = port_id;
+ ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
+ return hba_port;
+}
+
+/**
+ * mpi3mr_get_hba_port_by_id - find hba port by id
+ * @mrioc: Adapter instance reference
+ * @port_id - Port ID to search
+ *
+ * Return: mpi3mr_hba_port reference for the matched port
+ */
+
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id)
+{
+ struct mpi3mr_hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY)
+ continue;
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpi3mr_update_links - refreshing SAS phy link changes
+ * @mrioc: Adapter instance reference
+ * @sas_address_parent: SAS address of parent expander or host
+ * @handle: Firmware device handle of attached device
+ * @phy_number: Phy number
+ * @link_rate: New link rate
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port)
+{
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct mpi3mr_sas_phy *mr_sas_phy;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ mr_sas_phy = &mr_sas_node->phy[phy_number];
+ mr_sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) {
+ mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_phy->remote_identify);
+ mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node,
+ mr_sas_phy, mr_sas_phy->remote_identify.sas_address,
+ hba_port);
+ } else
+ memset(&mr_sas_phy->remote_identify, 0, sizeof(struct
+ sas_identify));
+
+ if (mr_sas_phy->phy)
+ mr_sas_phy->phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(link_rate);
+
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_phy->phy->dev,
+ "refresh: parent sas_address(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ (unsigned long long)sas_address_parent,
+ link_rate, phy_number, handle, (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+}
+
+/**
+ * mpi3mr_sas_host_refresh - refreshing sas host object contents
+ * @mrioc: Adapter instance reference
+ *
+ * This function refreshes the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for each device addition or device info
+ * change events
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u8 link_rate;
+ u16 sz, port_id, attached_handle;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+
+ dprint_transport_info(mrioc,
+ "updating handles for sas_host(0x%016llx)\n",
+ (unsigned long long)mrioc->sas_hba.sas_address);
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ link_rate =
+ sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4;
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address,
+ attached_handle, i, link_rate,
+ mrioc->sas_hba.phy[i].hba_port);
+ }
+ out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_host_add - create sas host object
+ * @mrioc: Adapter instance reference
+ *
+ * This function creates the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for first device addition or device info
+ * change event.
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u16 sz, num_phys = 1, port_id, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_enclosure_page0 encl_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ num_phys = sas_io_unit_pg0->num_phys;
+ kfree(sas_io_unit_pg0);
+
+ mrioc->sas_hba.host_node = 1;
+ INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
+ mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
+ mrioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!mrioc->sas_hba.phy)
+ return;
+
+ mrioc->sas_hba.num_phys = num_phys;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ mrioc->sas_hba.phy[i].phy_id = i;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i],
+ phy_pg0, mrioc->sas_hba.parent_dev);
+ }
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__,
+ __func__);
+ goto out;
+ }
+ mrioc->sas_hba.enclosure_handle =
+ le16_to_cpu(dev_pg0.enclosure_handle);
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ mrioc->sas_hba.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ ioc_info(mrioc,
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ mrioc->sas_hba.handle,
+ (unsigned long long) mrioc->sas_hba.sas_address,
+ mrioc->sas_hba.num_phys);
+
+ if (mrioc->sas_hba.enclosure_handle) {
+ if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+ &encl_pg0, sizeof(dev_pg0),
+ MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.enclosure_handle)) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+ mrioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(encl_pg0.enclosure_logical_id);
+ }
+
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the attached device
+ * @sas_address_parent: sas address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * This function creates a new sas port object for the given end
+ * device matching sas address and hba_port and adds it to the
+ * sas_node's sas_port_list and expose the attached sas device
+ * to the SAS transport layer through sas_rphy_add.
+ *
+ * Returns a valid mpi3mr_sas_port reference or NULL.
+ */
+static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy, *next;
+ struct mpi3mr_sas_port *mr_sas_port;
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct sas_rphy *rphy;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ int i;
+ struct sas_port *port;
+
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL);
+ if (!mr_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&mr_sas_port->port_list);
+ INIT_LIST_HEAD(&mr_sas_port->phy_list);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!mr_sas_node) {
+ ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n",
+ __func__, (unsigned long long)sas_address_parent);
+ goto out_fail;
+ }
+
+ if ((mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_port->remote_identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->hba_port = hba_port;
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << i);
+ }
+
+ if (!mr_sas_port->num_phys) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+
+ if (!tgtdev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1;
+ }
+
+ if (!mr_sas_node->parent_dev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(mr_sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&port->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ sas_port_add_phy(port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+ mr_sas_phy->hba_port = hba_port;
+ }
+
+ mr_sas_port->port = port;
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ rphy = sas_end_device_alloc(port);
+ tgtdev->dev_spec.sas_sata_inf.rphy = rphy;
+ } else {
+ rphy = sas_expander_alloc(port,
+ mr_sas_port->remote_identify.device_type);
+ }
+ rphy->identify = mr_sas_port->remote_identify;
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ }
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0;
+ tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ dev_info(&rphy->dev,
+ "%s: added: handle(0x%04x), sas_address(0x%016llx)\n",
+ __func__, handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address);
+
+ mr_sas_port->rphy = rphy;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, true);
+ }
+
+ /* fill in report manufacture */
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_report_manufacture(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+
+ return mr_sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list,
+ port_siblings)
+ list_del(&mr_sas_phy->port_siblings);
+ kfree(mr_sas_port);
+ return NULL;
+}
+
+/**
+ * mpi3mr_sas_port_remove - remove port from the list
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of attached device
+ * @sas_address_parent: SAS address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * Removing object and freeing associated memory from the
+ * sas_port_list.
+ *
+ * Return: None
+ */
+static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+ unsigned long flags;
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_node *mr_sas_node;
+ u8 found = 0;
+ struct mpi3mr_sas_phy *mr_sas_phy, *next_phy;
+ struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ found = 1;
+ list_del(&mr_sas_port->port_list);
+ goto out;
+ }
+
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ if (mr_sas_node->host_node) {
+ list_for_each_entry_safe(srch_port, hba_port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (srch_port != hba_port)
+ continue;
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ srch_port, srch_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ break;
+ }
+ }
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if (mr_sas_node->phy[i].remote_identify.sas_address ==
+ sas_address)
+ memset(&mr_sas_node->phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ list_for_each_entry_safe(mr_sas_phy, next_phy,
+ &mr_sas_port->phy_list, port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ mr_sas_phy->phy_belongs_to_port = 0;
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete_phy(mr_sas_port->port,
+ mr_sas_phy->phy);
+ list_del(&mr_sas_phy->port_siblings);
+ }
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete(mr_sas_port->port);
+ ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, false);
+ }
+
+ kfree(mr_sas_port);
+}
+
+/**
+ * struct host_port - host port details
+ * @sas_address: SAS Address of the attached device
+ * @phy_mask: phy mask of host port
+ * @handle: Device Handle of attached device
+ * @iounit_port_id: port ID
+ * @used: host port is already matched with sas port from sas_port_list
+ * @lowest_phy: lowest phy ID of host port
+ */
+struct host_port {
+ u64 sas_address;
+ u32 phy_mask;
+ u16 handle;
+ u8 iounit_port_id;
+ u8 used;
+ u8 lowest_phy;
+};
+
+/**
+ * mpi3mr_update_mr_sas_port - update sas port objects during reset
+ * @mrioc: Adapter instance reference
+ * @h_port: host_port object
+ * @mr_sas_port: sas_port objects which needs to be updated
+ *
+ * Update the port ID of sas port object. Also add the phys if new phys got
+ * added to current sas port and remove the phys if some phys are moved
+ * out of the current sas port.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy;
+ u32 phy_mask_xor;
+ u64 phys_to_be_added, phys_to_be_removed;
+ int i;
+
+ h_port->used = 1;
+ mr_sas_port->marked_responding = 1;
+
+ dev_info(&mr_sas_port->port->dev,
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
+ h_port->iounit_port_id, h_port->phy_mask);
+
+ mr_sas_port->hba_port->port_id = h_port->iounit_port_id;
+ mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY;
+
+ /* Get the newly added phys bit map & removed phys bit map */
+ phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask;
+ phys_to_be_added = h_port->phy_mask & phy_mask_xor;
+ phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor;
+
+ /*
+ * Register these new phys to current mr_sas_port's port.
+ * if these phys are previously registered with another port
+ * then delete these phys from that port first.
+ */
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ mpi3mr_add_phy_to_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ /* Delete the phys which are not part of current mr_sas_port's port. */
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ }
+}
+
+/**
+ * mpi3mr_refresh_sas_ports - update host's sas ports during reset
+ * @mrioc: Adapter instance reference
+ *
+ * Update the host's sas ports during reset by checking whether
+ * sas ports are still intact or not. Add/remove phys if any hba
+ * phys are (moved in)/(moved out) of sas port. Also update
+ * io_unit_port if it got changed during reset.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
+{
+ struct host_port h_port[32];
+ int i, j, found, host_port_count = 0, port_idx;
+ u16 sz, attached_handle, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ struct mpi3mr_sas_port *mr_sas_port;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* Create a new expander port table */
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (!attached_handle)
+ continue;
+ found = 0;
+ for (j = 0; j < host_port_count; j++) {
+ if (h_port[j].handle == attached_handle) {
+ h_port[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ attached_handle))) {
+ dprint_reset(mrioc,
+ "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ attached_handle, __FILE__, __LINE__, __func__);
+ continue;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ ioc_status, attached_handle,
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+
+ port_idx = host_port_count;
+ h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address);
+ h_port[port_idx].handle = attached_handle;
+ h_port[port_idx].phy_mask = (1 << i);
+ h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ h_port[port_idx].lowest_phy = sasinf->phy_num;
+ h_port[port_idx].used = 0;
+ host_port_count++;
+ }
+
+ if (!host_port_count)
+ goto out;
+
+ if (mrioc->logging_level & MPI3_DEBUG_RESET) {
+ ioc_info(mrioc, "Host port details before reset\n");
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ mr_sas_port->hba_port->port_id,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
+ }
+ mr_sas_port = NULL;
+ ioc_info(mrioc, "Host port details after reset\n");
+ for (i = 0; i < host_port_count; i++) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ h_port[i].iounit_port_id, h_port[i].sas_address,
+ h_port[i].phy_mask, h_port[i].lowest_phy);
+ }
+ }
+
+ /* mark all host sas port entries as dirty */
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ mr_sas_port->marked_responding = 0;
+ mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY;
+ }
+
+ /* First check for matching lowest phy */
+ for (i = 0; i < host_port_count; i++) {
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if lowest phy is got enabled or disabled during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].phy_mask & mr_sas_port->phy_mask) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if expander cable is removed & connected to another HBA port during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_refresh_expanders - Refresh expander device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing expander devices during reset and remove from the upper layers
+ * or expose any newly detected expander device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ u16 ioc_status, handle;
+ u64 sas_address;
+ int i;
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ sas_expander->non_responding = 1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ sas_expander = NULL;
+
+ handle = 0xffff;
+
+ /* Search for responding expander devices and add them if they are newly got added */
+ while (true) {
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(struct mpi3_sas_expander_page0),
+ MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ dprint_reset(mrioc,
+ "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n",
+ ioc_status, handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ handle = le16_to_cpu(expander_pg0.dev_handle);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port);
+
+ if (!hba_port) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!sas_expander) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ sas_expander->non_responding = 0;
+ if (sas_expander->handle == handle)
+ continue;
+
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ }
+
+ /*
+ * Delete non responding expander devices and the corresponding
+ * hba_port if the non responding expander device's parent device
+ * is a host node.
+ */
+ sas_expander = NULL;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ if (sas_expander->non_responding) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_node_add - insert an expander to the list.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander sas node
+ * Context: This function will acquire sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return: None.
+ */
+static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &mrioc->sas_expander_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_add - Create expander object
+ * @mrioc: Adapter instance reference
+ * @handle: Expander firmware device handle
+ *
+ * This function creating expander object, stored in
+ * sas_expander_list and expose it to the SAS transport
+ * layer.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_enclosure_node *enclosure_dev;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ u16 ioc_status, parent_handle, temp_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ u8 port_id, link_rate;
+ struct mpi3mr_sas_port *mr_sas_port = NULL;
+ struct mpi3mr_hba_port *hba_port;
+ u32 phynum_handle;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle);
+ if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent)
+ != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ port_id = expander_pg0.io_unit_port;
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (sas_address_parent != mrioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = mpi3mr_expander_add(mrioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ } else {
+ /*
+ * When there is a parent expander present, update it's
+ * phys where child expander is connected with the link
+ * speed, attached dev handle and sas address.
+ */
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle =
+ (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ parent_handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc,
+ &ioc_status, &expander_pg1,
+ sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ temp_handle = le16_to_cpu(
+ expander_pg1.attached_dev_handle);
+ if (temp_handle != handle)
+ continue;
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ mpi3mr_update_links(mrioc, sas_address_parent,
+ handle, i, link_rate, hba_port);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
+ GFP_KERNEL);
+ if (!sas_expander)
+ return -1;
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.num_phys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+ sas_expander->hba_port = hba_port;
+
+ ioc_info(mrioc,
+ "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys) {
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent,
+ sas_expander->hba_port);
+ if (!mr_sas_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mr_sas_port->rphy->dev;
+ sas_expander->rphy = mr_sas_port->rphy;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].hba_port = hba_port;
+
+ if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i],
+ expander_pg1, sas_expander->parent_dev))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ enclosure_dev =
+ mpi3mr_enclosure_find_by_handle(mrioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
+ sas_expander->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+ }
+
+ mpi3mr_expander_node_add(mrioc, sas_expander);
+ return 0;
+
+out_fail:
+
+ if (mr_sas_port)
+ mpi3mr_sas_port_remove(mrioc,
+ sas_expander->sas_address,
+ sas_address_parent, sas_expander->hba_port);
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpi3mr_expander_node_remove - recursive removal of expander.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander device object
+ *
+ * Removes expander object and freeing associated memory from
+ * the sas_expander_list and removes the same from SAS TL, if
+ * one of the attached device is an expander then it recursively
+ * removes the expander device too.
+ *
+ * Return nothing.
+ */
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ unsigned long flags;
+ u8 port_id;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mr_sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (mrioc->reset_in_progress)
+ return;
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ else if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ port_id = sas_expander->hba_port->port_id;
+ mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent, sas_expander->hba_port);
+
+ ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address, port_id);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * mpi3mr_expander_remove - Remove expander object
+ * @mrioc: Adapter instance reference
+ * @sas_address: Remove expander sas_address
+ * @hba_port: HBA port reference
+ *
+ * This function remove expander object, stored in
+ * mrioc->sas_expander_list and removes it from the SAS TL by
+ * calling mpi3mr_expander_node_remove().
+ *
+ * Return: None
+ */
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ unsigned long flags;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (sas_expander)
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+
+}
+
+/**
+ * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function identifies whether the target device is
+ * attached directly or through expander and issues sas phy
+ * page0 or expander phy page1 and gets the link rate, if there
+ * is any failure in reading the pages then this returns link
+ * rate of 1.5.
+ *
+ * Return: logical link rate.
+ */
+static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u32 phynum_handle;
+ u16 ioc_status;
+
+ phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) {
+ phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT)
+ | tgtdev->parent_handle);
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+out:
+ return link_rate;
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function exposes the target device after
+ * preparing host_phy, setting up link rate etc.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ int retval = 0;
+ u8 link_rate, parent_phy_number;
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+ u8 port_id;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return -1;
+
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle,
+ &sas_address_parent) != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent;
+
+ parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ port_id = tgtdev->io_unit_port;
+
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port;
+
+ link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+
+ mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
+ parent_phy_number, link_rate, hba_port);
+
+ tgtdev->host_exposed = 1;
+ if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ sas_address_parent, hba_port)) {
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ } else if ((!tgtdev->starget)) {
+ if (!mrioc->is_driver_loading)
+ mpi3mr_sas_port_remove(mrioc, sas_address,
+ sas_address_parent, hba_port);
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function removes the target device
+ *
+ * Return: None.
+ */
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return;
+
+ hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port;
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent;
+ mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ hba_port);
+ tgtdev->host_exposed = 0;
+}
+
+/**
+ * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy
+ * @phy: SAS transport layer phy object
+ *
+ * Return: Port number for valid ID else 0xFFFF
+ */
+static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ if (hba_port)
+ port_id = hba_port->port_id;
+
+ return port_id;
+}
+
+/**
+ * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy
+ *
+ * @mrioc: Adapter instance reference
+ * @rphy: SAS transport layer remote phy object
+ *
+ * Retrieves HBA port number in which the device pointed by the
+ * rphy object is attached with.
+ *
+ * Return: Valid port number on success else OxFFFF.
+ */
+static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list,
+ list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->hba_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ port_id =
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ }
+ return port_id;
+}
+
+static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+
+ return shost_priv(shost);
+}
+
+static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+
+/**
+ * mpi3mr_get_expander_phy_error_log - return expander counters:
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ *
+ * Return: 0 for success, non-zero for failure.
+ *
+ */
+static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma, data_in_dma;
+ u32 data_out_sz, data_in_sz, sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_error_log_request);
+ data_in_sz = sizeof(struct phy_error_log_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_error_log_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy error log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log - function_result(%d)\n",
+ phy_error_log_reply->function_result);
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_linkerrors - return phy error counters
+ * @phy: The SAS transport layer phy object
+ *
+ * This function retrieves the phy error log information of the
+ * HBA or expander for which the phy belongs to
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_phy_page1 phy_pg1;
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_get_expander_phy_error_log(mrioc, phy);
+
+ memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1));
+ /* get hba phy error logs */
+ if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1,
+ sizeof(struct mpi3_sas_phy_page1),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.running_disparity_error_count);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.loss_dword_synch_count);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.phy_reset_problem_count);
+ return 0;
+}
+
+/**
+ * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID
+ * @rphy: The SAS transport layer remote phy object
+ * @identifier: Enclosure identifier to be returned
+ *
+ * Returns the enclosure id for the device pointed by the remote
+ * phy object.
+ *
+ * Return: 0 on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ *identifier =
+ tgtdev->enclosure_logical_id;
+ rc = 0;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_bay_identifier - Get bay ID
+ * @rphy: The SAS transport layer remote phy object
+ *
+ * Returns the slot id for the device pointed by the remote phy
+ * object.
+ *
+ * Return: Valid slot ID on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ rc = tgtdev->slot;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * mpi3mr_expander_phy_control - expander phy control
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ * @phy_operation: The phy operation to be executed
+ *
+ * Issues SMP passthru phy control request to execute a specific
+ * phy operation for a given expander device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u16 sz;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_control_request);
+ data_in_sz = sizeof(struct phy_control_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_control_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy control SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+ dprint_transport_info(mrioc,
+ "phy control - function_result(%d)\n",
+ phy_control_reply->function_result);
+ rc = 0;
+ }
+ out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_reset - Reset a given phy
+ * @phy: The SAS transport layer phy object
+ * @hard_reset: Flag to indicate the type of reset
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_iounit_control_request mpi_request;
+ struct mpi3_iounit_control_reply mpi_reply;
+ u16 request_sz = sizeof(struct mpi3_iounit_control_request);
+ u16 reply_sz = sizeof(struct mpi3_iounit_control_reply);
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, request_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL;
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] =
+ (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET :
+ MPI3_CTRL_ACTION_LINK_RESET);
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] =
+ phy->number;
+
+ dprint_transport_info(mrioc,
+ "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ hard_reset);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "phy reset request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+out:
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_enable - enable/disable phys
+ * @phy: The SAS transport layer phy object
+ * @enable: flag to enable/disable, enable phy when true
+ *
+ * This function enables/disables a given by executing required
+ * configuration page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ u16 sz;
+ int rc = 0;
+ int i, discovery_active;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when discovery is active */
+ for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) {
+ if (sas_io_unit_pg0->phy_data[i].port_flags &
+ MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) {
+ ioc_err(mrioc,
+ "discovery is active on port = %d, phy = %d\n"
+ "\tunable to enable/disable phys, try again later!\n",
+ sas_io_unit_pg0->phy_data[i].io_unit_port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_io_unit_pg1);
+ kfree(sas_io_unit_pg0);
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_speed - set phy min/max speed
+ * @phy: The SAS transport later phy object
+ * @rates: Rates defined as in sas_phy_linkrates
+ *
+ * This function sets the link rates given in the rates
+ * argument to the given phy by executing required configuration
+ * page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u16 sz, ioc_status;
+ int rc = 0;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate =
+ (rates->minimum_linkrate + (rates->maximum_linkrate << 4));
+
+ if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS)) {
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate &
+ MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK)
+ >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ }
+
+out:
+ kfree(sas_io_unit_pg1);
+ return rc;
+}
+
+/**
+ * mpi3mr_map_smp_buffer - map BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address holder
+ * @dma_len: Mapped DMA buffer length.
+ * @p: Virtual address holder
+ *
+ * This function maps the DMAable buffer
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+ /* Check if the request is split across multiple segments */
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address to be unmapped
+ * @p: Virtual address
+ *
+ * This function unmaps the DMAable buffer
+ */
+static void
+mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t dma_addr, void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mpi3mr_transport_smp_handler - handler for smp passthru
+ * @job: BSG job reference
+ * @shost: SCSI host object reference
+ * @rphy: SAS transport rphy object pointing the expander
+ *
+ * This is used primarily by smp utils for sending the SMP
+ * commands to the expanders attached to the controller
+ */
+static void
+mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ int rc;
+ void *psge;
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t dma_len_in;
+ size_t dma_len_out;
+ unsigned int reslen = 0;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ &dma_addr_out, &dma_len_out, &addr_out);
+ if (rc)
+ goto out;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ &dma_addr_in, &dma_len_in, &addr_in);
+ if (rc)
+ goto unmap_out;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy);
+ mpi_request.sas_address = ((rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(mrioc->sas_hba.sas_address));
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in);
+
+ dprint_transport_info(mrioc, "sending SMP request\n");
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto unmap_in;
+
+ dprint_transport_info(mrioc,
+ "SMP request completed with ioc_status(0x%04x)\n", ioc_status);
+
+ dprint_transport_info(mrioc,
+ "SMP request - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ memcpy(job->reply, &mpi_reply, reply_sz);
+ job->reply_len = reply_sz;
+ reslen = le16_to_cpu(mpi_reply.response_data_length);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+
+ rc = 0;
+unmap_in:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ dma_addr_in, addr_in);
+unmap_out:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ dma_addr_out, addr_out);
+out:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template mpi3mr_transport_functions = {
+ .get_linkerrors = mpi3mr_transport_get_linkerrors,
+ .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier,
+ .get_bay_identifier = mpi3mr_transport_get_bay_identifier,
+ .phy_reset = mpi3mr_transport_phy_reset,
+ .phy_enable = mpi3mr_transport_phy_enable,
+ .set_phy_speed = mpi3mr_transport_phy_speed,
+ .smp_handler = mpi3mr_transport_smp_handler,
+};
+
+struct scsi_transport_template *mpi3mr_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index d00431f553e1..4d0be5ab98c1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -534,6 +534,7 @@ typedef struct _MPI2_CONFIG_REPLY {
****************************************************************************/
#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C)
/*MPI v2.0 SAS products */
#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 331e896d8225..8b22df8c1792 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2990,19 +2990,26 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
+ u64 coherent_dma_mask, dma_mask;
- if (ioc->is_mcpu_endpoint ||
- sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
+ if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
+ dma_get_required_mask(&pdev->dev) <= 32) {
ioc->dma_mask = 32;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
ioc->dma_mask = 63;
- else
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
+ } else {
ioc->dma_mask = 64;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ }
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
+ if (ioc->use_32bit_dma)
+ coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, dma_mask) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
return -ENODEV;
if (ioc->dma_mask > 32) {
@@ -4313,7 +4320,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4335,7 +4342,7 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4358,7 +4365,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = msix_task;
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4379,7 +4386,7 @@ _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -5425,6 +5432,151 @@ out:
}
/**
+ * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
+ *
+ * @ioc : per adapter object
+ * @n : ptr to the ATTO nvram structure
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
+ struct ATTO_SAS_NVRAM *n)
+{
+ int r = -EINVAL;
+ union ATTO_SAS_ADDRESS *s1;
+ u32 len;
+ u8 *pb;
+ u8 ckSum;
+
+ /* validate nvram checksum */
+ pb = (u8 *) n;
+ ckSum = ATTO_SASNVR_CKSUM_SEED;
+ len = sizeof(struct ATTO_SAS_NVRAM);
+
+ while (len--)
+ ckSum = ckSum + pb[len];
+
+ if (ckSum) {
+ ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
+ return r;
+ }
+
+ s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
+
+ if (n->Signature[0] != 'E'
+ || n->Signature[1] != 'S'
+ || n->Signature[2] != 'A'
+ || n->Signature[3] != 'S')
+ ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
+ else if (n->Version > ATTO_SASNVR_VERSION)
+ ioc_info(ioc, "Invalid ATTO NVRAM version");
+ else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
+ || s1->b[0] != 0x50
+ || s1->b[1] != 0x01
+ || s1->b[2] != 0x08
+ || (s1->b[3] & 0xF0) != 0x60
+ || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
+ ioc_err(ioc, "Invalid ATTO SAS address\n");
+ } else
+ r = 0;
+ return r;
+}
+
+/**
+ * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
+ *
+ * @ioc : per adapter object
+ * @*sas_addr : return sas address
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
+{
+ Mpi2ManufacturingPage1_t mfg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ struct ATTO_SAS_NVRAM *nvram;
+ int r;
+ __be64 addr;
+
+ r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
+ if (r) {
+ ioc_err(ioc, "Failed to read manufacturing page 1\n");
+ return r;
+ }
+
+ /* validate nvram */
+ nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
+ r = mpt3sas_atto_validate_nvram(ioc, nvram);
+ if (r)
+ return r;
+
+ addr = *((__be64 *) nvram->SasAddr);
+ sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
+ return r;
+}
+
+/**
+ * mpt3sas_atto_init - perform initializaion for ATTO branded
+ * adapter.
+ * @ioc : per adapter object
+ *5
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
+{
+ int sz = 0;
+ Mpi2BiosPage4_t *bios_pg4 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ int ix;
+ union ATTO_SAS_ADDRESS sas_addr;
+ union ATTO_SAS_ADDRESS temp;
+ union ATTO_SAS_ADDRESS bias;
+
+ r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
+ if (r)
+ return r;
+
+ /* get header first to get size */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
+ return r;
+ }
+
+ sz = mpi_reply.Header.PageLength * sizeof(u32);
+ bios_pg4 = kzalloc(sz, GFP_KERNEL);
+ if (!bios_pg4) {
+ ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
+ return -ENOMEM;
+ }
+
+ /* read bios page 4 */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4\n");
+ goto out;
+ }
+
+ /* Update bios page 4 with the ATTO WWID */
+ bias.q = sas_addr.q;
+ bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
+
+ for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
+ temp.q = sas_addr.q;
+ temp.b[7] += ix;
+ bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
+ bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
+ }
+ r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+
+out:
+ kfree(bios_pg4);
+ return r;
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -5447,6 +5599,13 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
}
+
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ rc = mpt3sas_atto_init(ioc);
+ if (rc)
+ return rc;
+ }
+
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
@@ -5496,12 +5655,21 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
rc = _base_assign_fw_reported_qd(ioc);
if (rc)
return rc;
- rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- if (rc)
- return rc;
- rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- if (rc)
- return rc;
+
+ /*
+ * ATTO doesn't use bios page 2 and 3 for bios settings.
+ */
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO)
+ ioc->bios_pg3.BiosVersion = 0;
+ else {
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ }
+
rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
if (rc)
return rc;
@@ -6895,7 +7063,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
- writel(request[i], &ioc->chip->Doorbell);
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -6914,16 +7082,16 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
__LINE__);
return -EFAULT;
}
- reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
for (i = 2; i < default_reply->MsgLength * 2; i++) {
@@ -6935,8 +7103,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
ioc->base_readl(&ioc->chip->Doorbell);
else
- reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[i] = le16_to_cpu(
+ ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e584cf0ffc23..05364aa15ecd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "42.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 42
+#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 43
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1652,6 +1652,32 @@ struct mpt3sas_debugfs_buffer {
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
+/*
+ * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored
+ * in Manufacturing page 1 used to get
+ * ATTO SasAddr.
+ */
+struct ATTO_SAS_NVRAM {
+ u8 Signature[4];
+ u8 Version;
+#define ATTO_SASNVR_VERSION 0
+
+ u8 Checksum;
+#define ATTO_SASNVR_CKSUM_SEED 0x5A
+ u8 Pad[10];
+ u8 SasAddr[8];
+#define ATTO_SAS_ADDR_ALIGN 64
+ u8 Reserved[232];
+};
+
+#define ATTO_SAS_ADDR_DEVNAME_BIAS 63
+
+union ATTO_SAS_ADDRESS {
+ U8 b[8];
+ U16 w[4];
+ U32 d[2];
+ U64 q;
+};
/* base shared API */
extern struct list_head mpt3sas_ioc_list;
@@ -1828,6 +1854,9 @@ int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
u8 *num_phys);
int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
+
int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
u16 sz);
@@ -1846,6 +1875,12 @@ int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage2_t *config_page);
int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
+int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage0_t *config_page);
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index a8dd14c91efd..d114ef381c44 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -541,6 +541,42 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -757,11 +793,99 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
+
out:
return r;
}
/**
+ * mpt3sas_config_set_bios_pg4 - write out bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg4 - read bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ /*
+ * The sizeof the page is variable. Allow for just the
+ * size to be returned
+ */
+ if (config_page && sz_config_pg) {
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ }
+
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 84c87c2c3e7e..0d8b1e942ded 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -948,6 +948,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
break;
}
case MPI2_FUNCTION_FW_DOWNLOAD:
+ {
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
+ ret = -EPERM;
+ break;
+ }
+ fallthrough;
+ }
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
@@ -1686,6 +1694,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
request_data = ioc->diag_buffer[buffer_type];
@@ -1787,6 +1796,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
@@ -2163,6 +2173,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
@@ -2417,6 +2428,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 791a406b4f8e..8e24ebcebfe5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5156,6 +5156,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* invalid device handle */
handle = sas_target_priv_data->handle;
+
+ /*
+ * Avoid error handling escallation when device is disconnected
+ */
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ return 0;
+ }
+ }
+
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scsi_done(scmd);
@@ -11974,7 +11987,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
- .cmd_per_lun = 7,
+ .cmd_per_lun = 128,
.shost_groups = mpt3sas_host_groups,
.sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
@@ -12732,6 +12745,12 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
/*
+ * ATTO Branded ExpressSAS H12xx GT
+ */
+ { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
* Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
*/
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 91d78d0a38fe..628b08ba6770 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3612,6 +3612,10 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
return -1;
}
+
+ if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT)
+ atomic_dec(&pm8001_dev->running_req);
+
ts = &t->task_status;
if (status != 0)
pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index c5e3f380a01c..b08f52673889 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -612,7 +612,7 @@ struct fw_control_info {
operations.*/
u32 reserved;/* padding required for 64 bit
alignment */
- u8 buffer[1];/* Start of buffer */
+ u8 buffer[];/* Start of buffer */
};
struct fw_control_ex {
struct fw_control_info *fw_control;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index bbc4d5890ae6..e045c6e25090 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1921,6 +1921,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cecfb2cb4c7b..df2fe7bd26d1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -618,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
sizeof(struct qedi_endpoint *)), GFP_KERNEL);
if (!qedi->ep_tbl)
return -ENOMEM;
- port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
QEDI_LOCAL_PORT_MIN, port_id)) {
qedi_cm_free_mem(qedi);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5db9bf69dcff..cd75b179410d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2519,19 +2519,23 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
+ if (IS_QLA27XX(ha))
+ regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
+
if (IS_QLA28XX(ha)) {
qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+ regions.nvme_params = active_regions.aux.nvme_params;
}
ql_dbg(ql_dbg_user, vha, 0x70e1,
- "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
__func__, vha->host_no, regions.global_image,
regions.board_config, regions.vpd_nvram,
- regions.npiv_config_0_1, regions.npiv_config_2_3);
+ regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index bb64b9c5a74b..d38dab0a07e8 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -314,7 +314,8 @@ struct qla_active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
- uint8_t reserved[32];
+ uint8_t nvme_params;
+ uint8_t reserved[31];
} __packed;
#include "qla_edif_bsg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 7cf1f78cbaee..d7e8454304ce 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2455,7 +2455,7 @@ qla83xx_fw_dump_failed_0:
/****************************************************************************/
/* Write the debug message prefix into @pbuf. */
-static void ql_dbg_prefix(char *pbuf, int pbuf_size,
+static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
const scsi_qla_host_t *vha, uint msg_id)
{
if (vha) {
@@ -2464,6 +2464,9 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
dev_name(&(pdev->dev)), msg_id, vha->host_no);
+ } else if (pdev) {
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&pdev->dev), msg_id);
} else {
/* <module-name> [<dev-name>]-<msg-id>: : */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
@@ -2491,20 +2494,20 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
struct va_format vaf;
char pbuf[64];
- if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+ ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
+
+ if (!ql_mask_match(level))
return;
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
+
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
-
- if (!ql_mask_match(level))
- trace_ql_dbg_log(pbuf, &vaf);
- else
- pr_warn("%s%pV", pbuf, &vaf);
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2533,6 +2536,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (pdev == NULL)
return;
+
+ ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2541,7 +2547,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
+ id + ql_dbg_offset);
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2570,7 +2578,10 @@ ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
+ ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
va_start(va, fmt);
@@ -2621,7 +2632,10 @@ ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id);
+ ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
va_start(va, fmt);
@@ -2716,7 +2730,11 @@ ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id);
+ ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id);
va_start(va, fmt);
@@ -2762,6 +2780,8 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
struct va_format vaf;
char pbuf[128];
+ ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2770,8 +2790,10 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL,
- id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id + ql_dbg_offset);
+
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index feeb1666227f..70482b55d240 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -385,3 +385,46 @@ ql_mask_match(uint level)
return level && ((level & ql2xextended_error_logging) == level);
}
+
+static inline int
+ql_mask_match_ext(uint level, int *log_tunable)
+{
+ if (*log_tunable == 1)
+ *log_tunable = QL_DBG_DEFAULT1_MASK;
+
+ return (level & *log_tunable) == level;
+}
+
+/* Assumes local variable pbuf and pbuf_ready present. */
+#define ql_ktrace(dbg_msg, level, pbuf, pdev, vha, id, fmt) do { \
+ struct va_format _vaf; \
+ va_list _va; \
+ u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \
+ \
+ pbuf[0] = 0; \
+ if (!trace_ql_dbg_log_enabled()) \
+ break; \
+ \
+ if (dbg_msg && !ql_mask_match_ext(level, \
+ &ql2xextended_error_logging_ktrace)) \
+ break; \
+ \
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, vha, id + dbg_off); \
+ \
+ va_start(_va, fmt); \
+ _vaf.fmt = fmt; \
+ _vaf.va = &_va; \
+ \
+ trace_ql_dbg_log(pbuf, &_vaf); \
+ \
+ va_end(_va); \
+} while (0)
+
+#define QLA_ENABLE_KERNEL_TRACING
+
+#ifdef QLA_ENABLE_KERNEL_TRACING
+#define QLA_TRACE_ENABLE(_tr) \
+ trace_array_set_clr_event(_tr, "qla", NULL, true)
+#else /* QLA_ENABLE_KERNEL_TRACING */
+#define QLA_TRACE_ENABLE(_tr)
+#endif /* QLA_ENABLE_KERNEL_TRACING */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3ec6a200942e..802eec6407d9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,11 @@
#include <uapi/scsi/fc/fc_els.h>
+#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *dfs_##_debugfs_file_name
+#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *qla_dfs_##_debugfs_file_name
+
/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
typedef struct {
uint8_t domain;
@@ -4768,6 +4773,7 @@ struct active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
+ uint8_t nvme_params;
} aux;
};
@@ -5052,6 +5058,7 @@ struct qla27xx_image_status {
#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
+#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4
#define SET_VP_IDX 1
#define SET_AL_PA 2
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 85bd0e468d43..777808af5634 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -489,6 +489,99 @@ qla_dfs_naqp_show(struct seq_file *s, void *unused)
return 0;
}
+/*
+ * Helper macros for setting up debugfs entries.
+ * _name: The name of the debugfs entry
+ * _ctx_struct: The context that was passed when creating the debugfs file
+ *
+ * QLA_DFS_SETUP_RD could be used when there is only a show function.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ *
+ * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ * - write function take the name qla_dfs_<sysfs-name>_write
+ *
+ * To have a new debugfs entry, do:
+ * 1. Create a "struct dentry *" in the appropriate structure in the format
+ * dfs_<sysfs-name>
+ * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
+ * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
+ * or QLA_DFS_ROOT_CREATE_FILE
+ * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
+ * or QLA_DFS_ROOT_REMOVE_FILE
+ *
+ * Example for creating "TEST" sysfs file:
+ * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
+ * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
+ * 3. In qla2x00_dfs_setup():
+ * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
+ * 4. In qla2x00_dfs_remove():
+ * QLA_DFS_REMOVE_FILE(ha, TEST);
+ */
+#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = qla_dfs_##_name##_write, \
+};
+
+#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
+ do { \
+ if (!qla_dfs_##_name) \
+ qla_dfs_##_name = debugfs_create_file(#_name, \
+ _perm, qla2x00_dfs_root, _ctx, \
+ &qla_dfs_##_name##_ops); \
+ } while (0)
+
+#define QLA_DFS_ROOT_REMOVE_FILE(_name) \
+ do { \
+ if (qla_dfs_##_name) { \
+ debugfs_remove(qla_dfs_##_name); \
+ qla_dfs_##_name = NULL; \
+ } \
+ } while (0)
+
+#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
+ do { \
+ (_struct)->dfs_##_name = debugfs_create_file(#_name, \
+ _perm, _parent, _ctx, \
+ &qla_dfs_##_name##_ops) \
+ } while (0)
+
+#define QLA_DFS_REMOVE_FILE(_struct, _name) \
+ do { \
+ if ((_struct)->dfs_##_name) { \
+ debugfs_remove((_struct)->dfs_##_name); \
+ (_struct)->dfs_##_name = NULL; \
+ } \
+ } while (0)
+
static int
qla_dfs_naqp_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 400a8b6f3982..00ccc41cef14 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1551,7 +1551,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
sa_frame.port_id.b24);
rval = -EINVAL;
- SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
goto done;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 361015b5763e..f307beed9d29 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1675,6 +1675,7 @@ struct qla_flt_location {
#define FLT_REG_VPD_SEC_27XX_1 0x52
#define FLT_REG_VPD_SEC_27XX_2 0xD8
#define FLT_REG_VPD_SEC_27XX_3 0xDA
+#define FLT_REG_NVME_PARAMS_27XX 0x21
/* 28xx */
#define FLT_REG_AUX_IMG_PRI_28XX 0x125
@@ -1691,6 +1692,8 @@ struct qla_flt_location {
#define FLT_REG_MPI_SEC_28XX 0xF0
#define FLT_REG_PEP_PRI_28XX 0xD1
#define FLT_REG_PEP_SEC_28XX 0xF1
+#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E
+#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179
struct qla_flt_region {
__le16 code;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5dd2932382ee..e3256e721be1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -70,8 +70,6 @@ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
-extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -163,6 +161,7 @@ extern int ql2xrdpenable;
extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
+extern int ql2xextended_error_logging_ktrace;
extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
@@ -193,8 +192,6 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
-extern int ql2xrspq_follow_inptr;
-extern int ql2xrspq_follow_inptr_legacy;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -279,7 +276,6 @@ extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
-extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
@@ -616,7 +612,6 @@ void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **
/*
* Global Function Prototypes in qla_sup.c source file.
*/
-extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
@@ -788,12 +783,6 @@ extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
-extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
@@ -878,8 +867,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
-extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7fe0e52c11d..e12db95de688 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7933,6 +7933,9 @@ qla28xx_component_status(
active_regions->aux.npiv_config_2_3 =
qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+
+ active_regions->aux.nvme_params =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
}
static int
@@ -8041,11 +8044,12 @@ check_valid_image:
}
ql_dbg(ql_dbg_init, vha, 0x018f,
- "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
active_regions->aux.board_config,
active_regions->aux.vpd_nvram,
active_regions->aux.npiv_config_0_1,
- active_regions->aux.npiv_config_2_3);
+ active_regions->aux.npiv_config_2_3,
+ active_regions->aux.nvme_params);
}
void
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 76e79f350a22..e19fde304e5c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3764,7 +3764,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
u16 rsp_in = 0, cur_ring_index;
- int follow_inptr, is_shadow_hba;
+ int is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3774,25 +3774,18 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
-#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
+#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
do { \
- if (_update) { \
- _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
- } \
} while (0)
is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
- follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
- ql2xrspq_follow_inptr_legacy;
- __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
- while ((likely(follow_inptr &&
- rsp->ring_index != rsp_in &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
- (!follow_inptr &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
+ while (rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
cur_ring_index = rsp->ring_index;
@@ -3906,8 +3899,7 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
- __update_rsp_in(follow_inptr, is_shadow_hba,
- rsp, rsp_in);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 87a93892deac..2c85f3cce726 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -15,6 +15,8 @@
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
+#include <linux/trace_events.h>
+#include <linux/trace.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -35,6 +37,8 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+static struct trace_array *qla_trc_array;
+
int ql2xfulldump_on_mpifail;
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
@@ -117,6 +121,11 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
+int ql2xextended_error_logging_ktrace = 1;
+module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
+ "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
+
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd,
@@ -333,21 +342,11 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
-u32 ql2xdelay_before_pci_error_handling = 5;
+static u32 ql2xdelay_before_pci_error_handling = 5;
module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
"Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
-int ql2xrspq_follow_inptr = 1;
-module_param(ql2xrspq_follow_inptr, int, 0644);
-MODULE_PARM_DESC(ql2xrspq_follow_inptr,
- "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
-
-int ql2xrspq_follow_inptr_legacy = 1;
-module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
-MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
- "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
-
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -2849,6 +2848,27 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
+static void
+qla_trace_init(void)
+{
+ qla_trc_array = trace_array_get_by_name("qla2xxx");
+ if (!qla_trc_array) {
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
+ return;
+ }
+
+ QLA_TRACE_ENABLE(qla_trc_array);
+}
+
+static void
+qla_trace_uninit(void)
+{
+ if (!qla_trc_array)
+ return;
+ trace_array_put(qla_trc_array);
+}
+
/*
* PCI driver interface
*/
@@ -3530,7 +3550,7 @@ skip_dpc:
qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
- ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ ql_log(ql_log_info, base_vha, 0x0122,
"skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -8189,6 +8209,8 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(sw_info_t) != 32);
BUILD_BUG_ON(sizeof(target_id_t) != 2);
+ qla_trace_init();
+
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
@@ -8267,6 +8289,8 @@ qlt_exit:
destroy_cache:
kmem_cache_destroy(srb_cachep);
+
+ qla_trace_uninit();
return ret;
}
@@ -8285,6 +8309,7 @@ qla2x00_module_exit(void)
fc_release_transport(qla2xxx_transport_template);
qlt_exit();
kmem_cache_destroy(srb_cachep);
+ qla_trace_uninit();
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 4acff4e84b90..bb754a950802 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1557,11 +1557,11 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- while (!list_empty(&tgt->sess_works_list)) {
+ do {
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- flush_scheduled_work();
+ flush_work(&tgt->sess_work);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- }
+ } while (!list_empty(&tgt->sess_works_list));
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
@@ -6336,69 +6336,6 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void qlt_tmr_work(struct qla_tgt *tgt,
- struct qla_tgt_sess_work_param *prm)
-{
- struct atio_from_isp *a = &prm->tm_iocb2;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- unsigned long flags;
- be_id_t s_id;
- int rc;
- u64 unpacked_lun;
- int fn;
- void *iocb;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term2;
-
- s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has got an extra creation ref */
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (!sess)
- goto out_term2;
- } else {
- if (sess->deleted) {
- goto out_term2;
- }
-
- if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
- "%s: kref_get fail %8phC\n",
- __func__, sess->port_name);
- goto out_term2;
- }
- }
-
- iocb = a;
- fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun =
- scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
-
- rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ha->tgt.tgt_ops->put_sess(sess);
-
- if (rc != 0)
- goto out_term;
- return;
-
-out_term2:
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-out_term:
- qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
-}
-
static void qlt_sess_work_fn(struct work_struct *work)
{
struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
@@ -6425,9 +6362,6 @@ static void qlt_sess_work_fn(struct work_struct *work)
case QLA_TGT_SESS_WORK_ABORT:
qlt_abort_work(tgt, prm);
break;
- case QLA_TGT_SESS_WORK_TM:
- qlt_tmr_work(tgt, prm);
- break;
default:
BUG_ON(1);
break;
@@ -6514,7 +6448,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->del_sess_list);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index de3942b8efc4..7df86578214f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -813,9 +813,6 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock */
- struct list_head del_sess_list;
-
spinlock_t sess_work_lock;
struct list_head sess_works_list;
struct work_struct sess_work;
@@ -945,7 +942,6 @@ struct qla_tgt_sess_work_param {
struct list_head sess_works_list_entry;
#define QLA_TGT_SESS_WORK_ABORT 1
-#define QLA_TGT_SESS_WORK_TM 2
int type;
union {
@@ -1079,8 +1075,6 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
-extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
- struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f3257d46b6d2..03f3e2cd62b5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.800-k"
+#define QLA2XXX_VERSION "10.02.07.900-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 800
+#define QLA_DRIVER_BETA_VER 900
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 57f2f4135a06..8c961ff03fcd 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -909,7 +909,8 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
sg_count = dma_map_sg(&qpti->op->dev, sg,
scsi_sg_count(Cmnd),
Cmnd->sc_data_direction);
-
+ if (!sg_count)
+ return -1;
ds = cmd->dataseg;
cmd->segment_cnt = sg_count;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 786fb963cf3f..6995c8979230 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -334,6 +334,7 @@ enum blk_eh_timer_return scsi_timeout(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ atomic_inc(&scmd->device->iotmo_cnt);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
@@ -514,6 +515,11 @@ static void scsi_report_sense(struct scsi_device *sdev,
}
}
+static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status)
+{
+ cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
@@ -644,7 +650,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
- set_host_byte(scmd, DID_ALLOC_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC);
return SUCCESS;
}
fallthrough;
@@ -652,14 +658,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- set_host_byte(scmd, DID_MEDIUM_ERROR);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
@@ -668,7 +674,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
fallthrough;
case ILLEGAL_REQUEST:
@@ -678,7 +684,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
}
return SUCCESS;
@@ -1983,7 +1989,7 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
- set_host_byte(scmd, DID_NEXUS_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT);
return SUCCESS; /* causes immediate i/o error */
}
return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7ec4ab2b111..8b89fab7c420 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -581,16 +581,36 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
return false;
}
+static inline u8 get_scsi_ml_byte(int result)
+{
+ return (result >> 8) & 0xff;
+}
+
/**
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
- * @cmd: SCSI command
* @result: scsi error code
*
- * Translate a SCSI result code into a blk_status_t value. May reset the host
- * byte of @cmd->result.
+ * Translate a SCSI result code into a blk_status_t value.
*/
-static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
+static blk_status_t scsi_result_to_blk_status(int result)
{
+ /*
+ * Check the scsi-ml byte first in case we converted a host or status
+ * byte.
+ */
+ switch (get_scsi_ml_byte(result)) {
+ case SCSIML_STAT_OK:
+ break;
+ case SCSIML_STAT_RESV_CONFLICT:
+ return BLK_STS_NEXUS;
+ case SCSIML_STAT_NOSPC:
+ return BLK_STS_NOSPC;
+ case SCSIML_STAT_MED_ERROR:
+ return BLK_STS_MEDIUM;
+ case SCSIML_STAT_TGT_FAILURE:
+ return BLK_STS_TARGET;
+ }
+
switch (host_byte(result)) {
case DID_OK:
if (scsi_status_is_good(result))
@@ -599,18 +619,6 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
case DID_TRANSPORT_FAILFAST:
case DID_TRANSPORT_MARGINAL:
return BLK_STS_TRANSPORT;
- case DID_TARGET_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_TARGET;
- case DID_NEXUS_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NEXUS;
- case DID_ALLOC_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NOSPC;
- case DID_MEDIUM_ERROR:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_MEDIUM;
default:
return BLK_STS_IOERR;
}
@@ -697,7 +705,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
if (sense_valid)
sense_current = !scsi_sense_is_deferred(&sshdr);
- blk_stat = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -878,14 +886,14 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
SCSI_SENSE_BUFFERSIZE);
}
if (sense_current)
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
} else if (blk_rq_bytes(req) == 0 && sense_current) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets *blk_statp explicitly for the problem case.
*/
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
}
/*
* Recovered errors need reporting, but they're always treated as
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f385b3f04d6e..c52de9a973e4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -19,6 +19,17 @@ struct scsi_nl_hdr;
#define SCSI_CMD_RETRIES_NO_LIMIT -1
/*
+ * Error codes used by scsi-ml internally. These must not be used by drivers.
+ */
+enum scsi_ml_status {
+ SCSIML_STAT_OK = 0x00,
+ SCSIML_STAT_RESV_CONFLICT = 0x01, /* Reservation conflict */
+ SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */
+ SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */
+ SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */
+};
+
+/*
* Scsi Error Handler Flags
*/
#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5d61f58399dc..cac7c902cf70 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -828,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
+ switch (sdev->sdev_state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
+ mutex_unlock(&sdev->state_mutex);
+ return -EINVAL;
+ }
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
ret = 0;
} else {
@@ -976,6 +984,7 @@ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
show_sdev_iostat(iorequest_cnt);
show_sdev_iostat(iodone_cnt);
show_sdev_iostat(ioerr_cnt);
+show_sdev_iostat(iotmo_cnt);
static ssize_t
sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1295,6 +1304,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_iorequest_cnt.attr,
&dev_attr_iodone_cnt.attr,
&dev_attr_ioerr_cnt.attr,
+ &dev_attr_iotmo_cnt.attr,
&dev_attr_modalias.attr,
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a2524106206d..8934160c4a33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -543,7 +543,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
struct nlmsghdr *nlh;
struct fc_nl_event *event;
const char *name;
- u32 len;
+ size_t len, padding;
int err;
if (!data_buf || data_len < 4)
@@ -554,7 +554,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
goto send_fail;
}
- len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
+ len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
@@ -578,7 +578,9 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
event->event_num = event_number;
event->event_code = event_code;
if (data_len)
- memcpy(&event->event_data, data_buf, data_len);
+ memcpy(event->event_data_flex, data_buf, data_len);
+ padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
+ memset(event->event_data_flex + data_len, 0, padding);
nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
GFP_KERNEL);
@@ -1170,7 +1172,7 @@ static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
return 0;
}
-fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bd72c38d7bfc..f569cf0095c2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -998,8 +998,9 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
- u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ unsigned int sleep_flags;
+ u8 *buffer;
/*
* Because this function and the power management code both call
@@ -1007,7 +1008,7 @@ spi_dv_device(struct scsi_device *sdev)
* while suspend or resume is in progress. Hence the
* lock/unlock_system_sleep() calls.
*/
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
if (scsi_autopm_get_device(sdev))
goto unlock_system_sleep;
@@ -1058,7 +1059,7 @@ put_autopm:
scsi_autopm_put_device(sdev);
unlock_system_sleep:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 55e7c07ebe4c..b90a440e135d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4248,11 +4248,10 @@ static int st_probe(struct device *dev)
struct st_partstat *STps;
struct st_buffer *buffer;
int i, error;
- char *stp;
if (SDp->type != TYPE_TAPE)
return -ENODEV;
- if ((stp = st_incompatible(SDp))) {
+ if (st_incompatible(SDp)) {
sdev_printk(KERN_INFO, SDp,
"OnStream tapes are no longer supported;\n");
sdev_printk(KERN_INFO, SDp,
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6420f2127ce..8def242675ef 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -665,16 +665,17 @@ static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
if (sizeof(ver) == cp_len)
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ced292c4b96..bc46721aa01c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -60,6 +60,9 @@
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
+/* channel callback timeout in ms */
+#define CALLBACK_TIMEOUT 2
+
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
VSTOR_OPERATION_COMPLETE_IO = 1,
@@ -1029,7 +1032,7 @@ do_work:
*/
wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
if (!wrk) {
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_BAD_TARGET);
return;
}
@@ -1204,6 +1207,7 @@ static void storvsc_on_channel_callback(void *context)
struct hv_device *device;
struct storvsc_device *stor_device;
struct Scsi_Host *shost;
+ unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT);
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1224,6 +1228,11 @@ static void storvsc_on_channel_callback(void *context)
u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
sizeof(enum vstor_packet_operation);
+ if (unlikely(time_after(jiffies, time_limit))) {
+ hv_pkt_iter_close(channel);
+ return;
+ }
+
if (pktlen < minlen) {
dev_err(&device->device,
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
@@ -2059,7 +2068,7 @@ err_out3:
err_out2:
/*
* Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
+ * invoke storvsc_dev_remove() to rollback this state and
* this call also frees up the stor_device; hence the jump around
* err_out1 label.
*/
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 077a8e24bd28..2a79ab16134b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -141,10 +141,10 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 3fe562047d85..e4fafc77bd20 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -162,65 +162,6 @@ module_param(setup_strings, charp, 0);
static void wd33c93_execute(struct Scsi_Host *instance);
-#ifdef CONFIG_WD33C93_PIO
-static inline uchar
-read_wd33c93(const wd33c93_regs regs, uchar reg_num)
-{
- uchar data;
-
- outb(reg_num, regs.SASR);
- data = inb(regs.SCMD);
- return data;
-}
-
-static inline unsigned long
-read_wd33c93_count(const wd33c93_regs regs)
-{
- unsigned long value;
-
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- value = inb(regs.SCMD) << 16;
- value |= inb(regs.SCMD) << 8;
- value |= inb(regs.SCMD);
- return value;
-}
-
-static inline uchar
-read_aux_stat(const wd33c93_regs regs)
-{
- return inb(regs.SASR);
-}
-
-static inline void
-write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
-{
- outb(reg_num, regs.SASR);
- outb(value, regs.SCMD);
-}
-
-static inline void
-write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
-{
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- outb((value >> 16) & 0xff, regs.SCMD);
- outb((value >> 8) & 0xff, regs.SCMD);
- outb( value & 0xff, regs.SCMD);
-}
-
-#define write_wd33c93_cmd(regs, cmd) \
- write_wd33c93((regs), WD_COMMAND, (cmd))
-
-static inline void
-write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
-{
- int i;
-
- outb(WD_CDB_1, regs.SASR);
- for (i=0; i<len; i++)
- outb(cmnd[i], regs.SCMD);
-}
-
-#else /* CONFIG_WD33C93_PIO */
static inline uchar
read_wd33c93(const wd33c93_regs regs, uchar reg_num)
{
@@ -287,7 +228,6 @@ write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
for (i = 0; i < len; i++)
*regs.SCMD = cmnd[i];
}
-#endif /* CONFIG_WD33C93_PIO */
static inline uchar
read_1_byte(const wd33c93_regs regs)
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index b3800baccd2c..e5e4254b1477 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -180,13 +180,8 @@
/* This is what the 3393 chip looks like to us */
typedef struct {
-#ifdef CONFIG_WD33C93_PIO
- unsigned int SASR;
- unsigned int SCMD;
-#else
volatile unsigned char *SASR;
volatile unsigned char *SCMD;
-#endif
} wd33c93_regs;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 51afc66e839d..66b316d173b0 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -289,14 +289,6 @@ static unsigned int scsifront_host_byte(int32_t rslt)
return DID_TRANSPORT_DISRUPTED;
case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
return DID_TRANSPORT_FAILFAST;
- case XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE:
- return DID_TARGET_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE:
- return DID_NEXUS_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE:
- return DID_ALLOC_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR:
- return DID_MEDIUM_ERROR;
case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
return DID_TRANSPORT_MARGINAL;
default:
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 1235b7dc8496..2ed821f75816 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -22,7 +22,8 @@ config SLIM_QCOM_CTRL
config SLIM_QCOM_NGD_CTRL
tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
- depends on HAS_IOMEM && DMA_ENGINE && NET && QCOM_RPROC_COMMON
+ depends on HAS_IOMEM && DMA_ENGINE && NET
+ depends on QCOM_RPROC_COMMON || COMPILE_TEST
depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
select QCOM_PDR_HELPERS
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 0aa8408464ad..76c5e446d243 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1470,7 +1470,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_device_add(ngd->pdev);
+ ret = platform_device_add(ngd->pdev);
+ if (ret) {
+ platform_device_put(ngd->pdev);
+ kfree(ngd);
+ of_node_put(node);
+ return ret;
+ }
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
@@ -1543,10 +1549,8 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
- if (ret) {
- dev_err(&pdev->dev, "request IRQ failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "request IRQ failed\n");
ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify;
ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb);
@@ -1575,18 +1579,27 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl);
if (IS_ERR(ctrl->pdr)) {
- dev_err(dev, "Failed to init PDR handle\n");
- return PTR_ERR(ctrl->pdr);
+ ret = dev_err_probe(dev, PTR_ERR(ctrl->pdr),
+ "Failed to init PDR handle\n");
+ goto err_pdr_alloc;
}
pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd");
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
- dev_err(dev, "pdr add lookup failed: %d\n", ret);
- return PTR_ERR(pds);
+ ret = dev_err_probe(dev, PTR_ERR(pds), "pdr add lookup failed\n");
+ goto err_pdr_lookup;
}
platform_driver_register(&qcom_slim_ngd_driver);
return of_qcom_slim_ngd_register(dev, ctrl);
+
+err_pdr_alloc:
+ qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb);
+
+err_pdr_lookup:
+ pdr_handle_release(ctrl->pdr);
+
+ return ret;
}
static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 73e63920b1b9..40d0cc600cae 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -77,7 +77,7 @@ config MTK_MMSYS
config MTK_SVS
tristate "MediaTek Smart Voltage Scaling(SVS)"
- depends on MTK_EFUSE && NVMEM
+ depends on NVMEM_MTK_EFUSE && NVMEM
help
The Smart Voltage Scaling(SVS) engine is a piece of hardware
which has several controllers(banks) for calculating suitable
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
index 58cf8c40d08d..ed4c571f8771 100644
--- a/drivers/soc/sifive/Kconfig
+++ b/drivers/soc/sifive/Kconfig
@@ -2,9 +2,9 @@
if SOC_SIFIVE
-config SIFIVE_L2
- bool "Sifive L2 Cache controller"
+config SIFIVE_CCACHE
+ bool "Sifive Composable Cache controller"
help
- Support for the L2 cache controller on SiFive platforms.
+ Support for the composable cache controller on SiFive platforms.
endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
index b5caff77938f..1f5dc339bf82 100644
--- a/drivers/soc/sifive/Makefile
+++ b/drivers/soc/sifive/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
new file mode 100644
index 000000000000..1c171150e878
--- /dev/null
+++ b/drivers/soc/sifive/sifive_ccache.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive composable cache controller Driver
+ *
+ * Copyright (C) 2018-2022 SiFive, Inc.
+ *
+ */
+
+#define pr_fmt(fmt) "CCACHE: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <asm/cacheinfo.h>
+#include <soc/sifive/sifive_ccache.h>
+
+#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
+#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104
+#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120
+#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124
+#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128
+
+#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140
+#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144
+#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160
+#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164
+#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_CCACHE_CONFIG 0x00
+#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0)
+#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8)
+#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
+#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
+
+#define SIFIVE_CCACHE_WAYENABLE 0x08
+#define SIFIVE_CCACHE_ECCINJECTERR 0x40
+
+#define SIFIVE_CCACHE_MAX_ECCINTR 4
+
+static void __iomem *ccache_base;
+static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops ccache_cache_ops;
+static int level;
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+ DIR_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t ccache_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations ccache_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ccache_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &ccache_fops);
+}
+#endif
+
+static void ccache_config_read(void)
+{
+ u32 cfg;
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG);
+ pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n",
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg),
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg)));
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE);
+ pr_info("Index of the largest way enabled: %u\n", cfg);
+}
+
+static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { .compatible = "sifive,fu740-c000-ccache" },
+ { .compatible = "sifive,ccache0" },
+ { /* end of table */ }
+};
+
+static ATOMIC_NOTIFIER_HEAD(ccache_err_chain);
+
+int register_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier);
+
+int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
+
+static int ccache_largest_wayenabled(void)
+{
+ return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ccache_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *ccache_get_priv_group(struct cacheinfo
+ *this_leaf)
+{
+ /* We want to use private group for composable cache only */
+ if (this_leaf->level == level)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
+static irqreturn_t ccache_int_handler(int irq, void *device)
+{
+ unsigned int add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW);
+ pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DirError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DIR_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW);
+ /* Reading this register clears the DirFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DirECCFail");
+ panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW);
+ pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW);
+ pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init sifive_ccache_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc, intr_num;
+
+ np = of_find_matching_node(NULL, sifive_ccache_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ ccache_base = ioremap(res.start, resource_size(&res));
+ if (!ccache_base)
+ return -ENOMEM;
+
+ if (of_property_read_u32(np, "cache-level", &level))
+ return -ENOENT;
+
+ intr_num = of_property_count_u32_elems(np, "interrupts");
+ if (!intr_num) {
+ pr_err("No interrupts property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < intr_num; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
+ NULL);
+ if (rc) {
+ pr_err("Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ ccache_config_read();
+
+ ccache_cache_ops.get_priv_group = ccache_get_priv_group;
+ riscv_set_cacheinfo_ops(&ccache_cache_ops);
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+
+device_initcall(sifive_ccache_init);
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
deleted file mode 100644
index 59640a1d0b28..000000000000
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SiFive L2 cache controller Driver
- *
- * Copyright (C) 2018-2019 SiFive, Inc.
- *
- */
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/device.h>
-#include <asm/cacheinfo.h>
-#include <soc/sifive/sifive_l2_cache.h>
-
-#define SIFIVE_L2_DIRECCFIX_LOW 0x100
-#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
-#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
-
-#define SIFIVE_L2_DIRECCFAIL_LOW 0x120
-#define SIFIVE_L2_DIRECCFAIL_HIGH 0x124
-#define SIFIVE_L2_DIRECCFAIL_COUNT 0x128
-
-#define SIFIVE_L2_DATECCFIX_LOW 0x140
-#define SIFIVE_L2_DATECCFIX_HIGH 0x144
-#define SIFIVE_L2_DATECCFIX_COUNT 0x148
-
-#define SIFIVE_L2_DATECCFAIL_LOW 0x160
-#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
-#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
-
-#define SIFIVE_L2_CONFIG 0x00
-#define SIFIVE_L2_WAYENABLE 0x08
-#define SIFIVE_L2_ECCINJECTERR 0x40
-
-#define SIFIVE_L2_MAX_ECCINTR 4
-
-static void __iomem *l2_base;
-static int g_irq[SIFIVE_L2_MAX_ECCINTR];
-static struct riscv_cacheinfo_ops l2_cache_ops;
-
-enum {
- DIR_CORR = 0,
- DATA_CORR,
- DATA_UNCORR,
- DIR_UNCORR,
-};
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *sifive_test;
-
-static ssize_t l2_write(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
-{
- unsigned int val;
-
- if (kstrtouint_from_user(data, count, 0, &val))
- return -EINVAL;
- if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
- writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
- else
- return -EINVAL;
- return count;
-}
-
-static const struct file_operations l2_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = l2_write
-};
-
-static void setup_sifive_debug(void)
-{
- sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
-
- debugfs_create_file("sifive_debug_inject_error", 0200,
- sifive_test, NULL, &l2_fops);
-}
-#endif
-
-static void l2_config_read(void)
-{
- u32 regval, val;
-
- regval = readl(l2_base + SIFIVE_L2_CONFIG);
- val = regval & 0xFF;
- pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
- val = (regval & 0xFF00) >> 8;
- pr_info("L2CACHE: No. of ways per bank: %d\n", val);
- val = (regval & 0xFF0000) >> 16;
- pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
- val = (regval & 0xFF000000) >> 24;
- pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
-
- regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
- pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
-}
-
-static const struct of_device_id sifive_l2_ids[] = {
- { .compatible = "sifive,fu540-c000-ccache" },
- { .compatible = "sifive,fu740-c000-ccache" },
- { /* end of table */ },
-};
-
-static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
-
-int register_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
-
-int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
-
-static int l2_largest_wayenabled(void)
-{
- return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF;
-}
-
-static ssize_t number_of_ways_enabled_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", l2_largest_wayenabled());
-}
-
-static DEVICE_ATTR_RO(number_of_ways_enabled);
-
-static struct attribute *priv_attrs[] = {
- &dev_attr_number_of_ways_enabled.attr,
- NULL,
-};
-
-static const struct attribute_group priv_attr_group = {
- .attrs = priv_attrs,
-};
-
-static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf)
-{
- /* We want to use private group for L2 cache only */
- if (this_leaf->level == 2)
- return &priv_attr_group;
- else
- return NULL;
-}
-
-static irqreturn_t l2_int_handler(int irq, void *device)
-{
- unsigned int add_h, add_l;
-
- if (irq == g_irq[DIR_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
- pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DirError interrupt sig */
- readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DirECCFix");
- }
- if (irq == g_irq[DIR_UNCORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DIRECCFAIL_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DIRECCFAIL_LOW);
- /* Reading this register clears the DirFail interrupt sig */
- readl(l2_base + SIFIVE_L2_DIRECCFAIL_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
- "DirECCFail");
- panic("L2CACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
- }
- if (irq == g_irq[DATA_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
- pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataError interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DatECCFix");
- }
- if (irq == g_irq[DATA_UNCORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
- pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataFail interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
- "DatECCFail");
- }
-
- return IRQ_HANDLED;
-}
-
-static int __init sifive_l2_init(void)
-{
- struct device_node *np;
- struct resource res;
- int i, rc, intr_num;
-
- np = of_find_matching_node(NULL, sifive_l2_ids);
- if (!np)
- return -ENODEV;
-
- if (of_address_to_resource(np, 0, &res))
- return -ENODEV;
-
- l2_base = ioremap(res.start, resource_size(&res));
- if (!l2_base)
- return -ENOMEM;
-
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("L2CACHE: no interrupts property\n");
- return -ENODEV;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
- rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
- if (rc) {
- pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
- return rc;
- }
- }
-
- l2_config_read();
-
- l2_cache_ops.get_priv_group = l2_get_priv_group;
- riscv_set_cacheinfo_ops(&l2_cache_ops);
-
-#ifdef CONFIG_DEBUG_FS
- setup_sifive_debug();
-#endif
- return 0;
-}
-device_initcall(sifive_l2_init);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index d95b07896a3e..76515c33e639 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -11,11 +11,12 @@
#include "bus.h"
#include "sysfs_local.h"
-static DEFINE_IDA(sdw_ida);
+static DEFINE_IDA(sdw_bus_ida);
+static DEFINE_IDA(sdw_peripheral_ida);
static int sdw_get_id(struct sdw_bus *bus)
{
- int rc = ida_alloc(&sdw_ida, GFP_KERNEL);
+ int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
if (rc < 0)
return rc;
@@ -75,7 +76,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
/*
* Initialize multi_link flag
- * TODO: populate this flag by reading property from FW node
*/
bus->multi_link = false;
if (bus->ops->read_prop) {
@@ -157,9 +157,11 @@ static int sdw_delete_slave(struct device *dev, void *data)
mutex_lock(&bus->bus_lock);
- if (slave->dev_num) /* clear dev_num if assigned */
+ if (slave->dev_num) { /* clear dev_num if assigned */
clear_bit(slave->dev_num, bus->assigned);
-
+ if (bus->dev_num_ida_min)
+ ida_free(&sdw_peripheral_ida, slave->dev_num);
+ }
list_del_init(&slave->node);
mutex_unlock(&bus->bus_lock);
@@ -179,7 +181,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus)
sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
- ida_free(&sdw_ida, bus->id);
+ ida_free(&sdw_bus_ida, bus->id);
}
EXPORT_SYMBOL(sdw_bus_master_delete);
@@ -671,10 +673,18 @@ static int sdw_get_device_num(struct sdw_slave *slave)
{
int bit;
- bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
- if (bit == SDW_MAX_DEVICES) {
- bit = -ENODEV;
- goto err;
+ if (slave->bus->dev_num_ida_min) {
+ bit = ida_alloc_range(&sdw_peripheral_ida,
+ slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
+ GFP_KERNEL);
+ if (bit < 0)
+ goto err;
+ } else {
+ bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ if (bit == SDW_MAX_DEVICES) {
+ bit = -ENODEV;
+ goto err;
+ }
}
/*
@@ -751,7 +761,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
}
EXPORT_SYMBOL(sdw_extract_slave_id);
-static int sdw_program_device_num(struct sdw_bus *bus)
+static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
{
u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
struct sdw_slave *slave, *_s;
@@ -761,6 +771,8 @@ static int sdw_program_device_num(struct sdw_bus *bus)
int count = 0, ret;
u64 addr;
+ *programmed = false;
+
/* No Slave, so use raw xfer api */
ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
@@ -796,6 +808,16 @@ static int sdw_program_device_num(struct sdw_bus *bus)
found = true;
/*
+ * To prevent skipping state-machine stages don't
+ * program a device until we've seen it UNATTACH.
+ * Must return here because no other device on #0
+ * can be detected until this one has been
+ * assigned a device ID.
+ */
+ if (slave->status != SDW_SLAVE_UNATTACHED)
+ return 0;
+
+ /*
* Assign a new dev_num to this Slave and
* not mark it present. It will be marked
* present after it reports ATTACHED on new
@@ -809,6 +831,8 @@ static int sdw_program_device_num(struct sdw_bus *bus)
return ret;
}
+ *programmed = true;
+
break;
}
}
@@ -848,13 +872,13 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
mutex_lock(&bus->bus_lock);
dev_vdbg(bus->dev,
- "%s: changing status slave %d status %d new status %d\n",
- __func__, slave->dev_num, slave->status, status);
+ "changing status slave %d status %d new status %d\n",
+ slave->dev_num, slave->status, status);
if (status == SDW_SLAVE_UNATTACHED) {
dev_dbg(&slave->dev,
- "%s: initializing enumeration and init completion for Slave %d\n",
- __func__, slave->dev_num);
+ "initializing enumeration and init completion for Slave %d\n",
+ slave->dev_num);
init_completion(&slave->enumeration_complete);
init_completion(&slave->initialization_complete);
@@ -862,8 +886,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
} else if ((status == SDW_SLAVE_ATTACHED) &&
(slave->status == SDW_SLAVE_UNATTACHED)) {
dev_dbg(&slave->dev,
- "%s: signaling enumeration completion for Slave %d\n",
- __func__, slave->dev_num);
+ "signaling enumeration completion for Slave %d\n",
+ slave->dev_num);
complete(&slave->enumeration_complete);
}
@@ -1630,7 +1654,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
for_each_set_bit(bit, &port, 8) {
/* scp2 ports start from 4 */
- port_num = bit + 3;
+ port_num = bit + 4;
sdw_handle_port_interrupt(slave,
port_num,
&port_status[port_num]);
@@ -1642,7 +1666,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
for_each_set_bit(bit, &port, 8) {
/* scp3 ports start from 11 */
- port_num = bit + 10;
+ port_num = bit + 11;
sdw_handle_port_interrupt(slave,
port_num,
&port_status[port_num]);
@@ -1768,7 +1792,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
{
enum sdw_slave_status prev_status;
struct sdw_slave *slave;
- bool attached_initializing;
+ bool attached_initializing, id_programmed;
int i, ret = 0;
/* first check if any Slaves fell off the bus */
@@ -1789,19 +1813,33 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
i, slave->status);
sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+
+ /* Ensure driver knows that peripheral unattached */
+ ret = sdw_update_slave_status(slave, status[i]);
+ if (ret < 0)
+ dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
}
}
if (status[0] == SDW_SLAVE_ATTACHED) {
dev_dbg(bus->dev, "Slave attached, programming device number\n");
- ret = sdw_program_device_num(bus);
- if (ret < 0)
- dev_err(bus->dev, "Slave attach failed: %d\n", ret);
+
/*
- * programming a device number will have side effects,
- * so we deal with other devices at a later time
+ * Programming a device number will have side effects,
+ * so we deal with other devices at a later time.
+ * This relies on those devices reporting ATTACHED, which will
+ * trigger another call to this function. This will only
+ * happen if at least one device ID was programmed.
+ * Error returns from sdw_program_device_num() are currently
+ * ignored because there's no useful recovery that can be done.
+ * Returning the error here could result in the current status
+ * of other devices not being handled, because if no device IDs
+ * were programmed there's nothing to guarantee a status change
+ * to trigger another call to this function.
*/
- return ret;
+ sdw_program_device_num(bus, &id_programmed);
+ if (id_programmed)
+ return 0;
}
/* Continue to check other slave statuses */
@@ -1870,8 +1908,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
"Update Slave status failed:%d\n", ret);
if (attached_initializing) {
dev_dbg(&slave->dev,
- "%s: signaling initialization completion for Slave %d\n",
- __func__, slave->dev_num);
+ "signaling initialization completion for Slave %d\n",
+ slave->dev_num);
complete(&slave->initialization_complete);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 615b0b63a3e1..93929f19d083 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -544,9 +544,12 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
return SDW_CMD_IGNORED;
}
- /* fill response */
- for (i = 0; i < count; i++)
- msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]);
+ if (msg->flags == SDW_MSG_FLAG_READ) {
+ /* fill response */
+ for (i = 0; i < count; i++)
+ msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA,
+ cdns->response_buf[i]);
+ }
return SDW_CMD_OK;
}
@@ -566,7 +569,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
}
base = CDNS_MCP_CMD_BASE;
- addr = msg->addr;
+ addr = msg->addr + offset;
for (i = 0; i < count; i++) {
data = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
@@ -705,18 +708,15 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
CDNS_MCP_CMD_LEN, false);
- if (ret < 0)
- goto exit;
+ if (ret != SDW_CMD_OK)
+ return ret;
}
if (!(msg->len % CDNS_MCP_CMD_LEN))
- goto exit;
-
- ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
- msg->len % CDNS_MCP_CMD_LEN, false);
+ return SDW_CMD_OK;
-exit:
- return ret;
+ return _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+ msg->len % CDNS_MCP_CMD_LEN, false);
}
EXPORT_SYMBOL(cdns_xfer_msg);
@@ -790,6 +790,7 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
bool is_slave = false;
u32 mask;
+ u32 val;
int i, set_status;
memset(status, 0, sizeof(status));
@@ -797,41 +798,38 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
for (i = 0; i <= SDW_MAX_DEVICES; i++) {
mask = (slave_intstat >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
CDNS_MCP_SLAVE_STATUS_BITS;
- if (!mask)
- continue;
- is_slave = true;
set_status = 0;
- if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
- status[i] = SDW_SLAVE_RESERVED;
- set_status++;
- }
-
- if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
- status[i] = SDW_SLAVE_ATTACHED;
- set_status++;
- }
+ if (mask) {
+ is_slave = true;
- if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
- status[i] = SDW_SLAVE_ALERT;
- set_status++;
- }
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
+ status[i] = SDW_SLAVE_RESERVED;
+ set_status++;
+ }
- if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
- status[i] = SDW_SLAVE_UNATTACHED;
- set_status++;
- }
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
+ status[i] = SDW_SLAVE_ATTACHED;
+ set_status++;
+ }
- /* first check if Slave reported multiple status */
- if (set_status > 1) {
- u32 val;
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
+ status[i] = SDW_SLAVE_ALERT;
+ set_status++;
+ }
- dev_warn_ratelimited(cdns->dev,
- "Slave %d reported multiple Status: %d\n",
- i, mask);
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
+ status[i] = SDW_SLAVE_UNATTACHED;
+ set_status++;
+ }
+ }
- /* check latest status extracted from PING commands */
+ /*
+ * check that there was a single reported Slave status and when
+ * there is not use the latest status extracted from PING commands
+ */
+ if (set_status != 1) {
val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
val >>= (i * 2);
@@ -850,11 +848,6 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
status[i] = SDW_SLAVE_RESERVED;
break;
}
-
- dev_warn_ratelimited(cdns->dev,
- "Slave %d status updated to %d\n",
- i, status[i]);
-
}
}
@@ -969,9 +962,22 @@ static void cdns_update_slave_status_work(struct work_struct *work)
u32 device0_status;
int retry_count = 0;
+ /*
+ * Clear main interrupt first so we don't lose any assertions
+ * that happen during this function.
+ */
+ cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+ /*
+ * Clear the bits before handling so we don't lose any
+ * bits that re-assert.
+ */
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+
/* combine the two status */
slave_intstat = ((u64)slave1 << 32) | slave0;
@@ -979,8 +985,6 @@ static void cdns_update_slave_status_work(struct work_struct *work)
update_status:
cdns_update_slave_status(cdns, slave_intstat);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
/*
* When there is more than one peripheral per link, it's
@@ -997,6 +1001,11 @@ update_status:
* attention with PING commands. There is no need to check for
* ALERTS since they are not allowed until a non-zero
* device_number is assigned.
+ *
+ * Do not clear the INTSTAT0/1. While looping to enumerate devices on
+ * #0 there could be status changes on other devices - these must
+ * be kept in the INTSTAT so they can be handled when all #0 devices
+ * have been handled.
*/
device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
@@ -1016,8 +1025,7 @@ update_status:
}
}
- /* clear and unmask Slave interrupt now */
- cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+ /* unmask Slave interrupt now */
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 747983743a14..f81cdd83ec26 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -55,7 +55,26 @@ static const struct adr_remap dell_sku_0A3E[] = {
{}
};
+/*
+ * The HP Omen 16-k0005TX does not expose the correct version of RT711 on link0
+ * and does not expose a RT1316 on link3
+ */
+static const struct adr_remap hp_omen_16[] = {
+ /* rt711-sdca on link0 */
+ {
+ 0x000020025d071100ull,
+ 0x000030025d071101ull
+ },
+ /* rt1316-sdca on link3 */
+ {
+ 0x000120025d071100ull,
+ 0x000330025d131601ull
+ },
+ {}
+};
+
static const struct dmi_system_id adr_remap_quirk_table[] = {
+ /* TGL devices */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
@@ -78,6 +97,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
},
.driver_data = (void *)dell_sku_0A3E,
},
+ /* ADL devices */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
+ },
+ .driver_data = (void *)hp_omen_16,
+ },
{}
};
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index a5965e8827b9..244209358784 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -22,6 +22,9 @@
#include "bus.h"
#include "intel.h"
+/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
+#define INTEL_DEV_NUM_IDA_MIN 4
+
#define INTEL_MASTER_SUSPEND_DELAY_MS 3000
#define INTEL_MASTER_RESET_ITERATIONS 10
@@ -135,7 +138,7 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
if (!buf)
return -ENOMEM;
- links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
+ links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
ret = scnprintf(buf, RD_BUF, "Register Value\n");
ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
@@ -167,9 +170,8 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
ret += intel_sprintf(s, false, buf, ret,
SDW_SHIM_PCMSYCHC(i, j));
}
- ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
- ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
}
@@ -258,86 +260,6 @@ static void intel_debugfs_exit(struct sdw_intel *sdw) {}
/*
* shim ops
*/
-
-static int intel_link_power_up(struct sdw_intel *sdw)
-{
- unsigned int link_id = sdw->instance;
- void __iomem *shim = sdw->link_res->shim;
- u32 *shim_mask = sdw->link_res->shim_mask;
- struct sdw_bus *bus = &sdw->cdns.bus;
- struct sdw_master_prop *prop = &bus->prop;
- u32 spa_mask, cpa_mask;
- u32 link_control;
- int ret = 0;
- u32 syncprd;
- u32 sync_reg;
-
- mutex_lock(sdw->link_res->shim_lock);
-
- /*
- * The hardware relies on an internal counter, typically 4kHz,
- * to generate the SoundWire SSP - which defines a 'safe'
- * synchronization point between commands and audio transport
- * and allows for multi link synchronization. The SYNCPRD value
- * is only dependent on the oscillator clock provided to
- * the IP, so adjust based on _DSD properties reported in DSDT
- * tables. The values reported are based on either 24MHz
- * (CNL/CML) or 38.4 MHz (ICL/TGL+).
- */
- if (prop->mclk_freq % 6000000)
- syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
- else
- syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
-
- if (!*shim_mask) {
- dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
-
- /* we first need to program the SyncPRD/CPU registers */
- dev_dbg(sdw->cdns.dev,
- "%s: first link up, programming SYNCPRD\n", __func__);
-
- /* set SyncPRD period */
- sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
-
- /* Set SyncCPU bit */
- sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
- intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
-
- /* Link power up sequence */
- link_control = intel_readl(shim, SDW_SHIM_LCTL);
-
- /* only power-up enabled links */
- spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
- cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
-
- link_control |= spa_mask;
-
- ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
- goto out;
- }
-
- /* SyncCPU will change once link is active */
- ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
- SDW_SHIM_SYNC_SYNCCPU, 0);
- if (ret < 0) {
- dev_err(sdw->cdns.dev,
- "Failed to set SHIM_SYNC: %d\n", ret);
- goto out;
- }
- }
-
- *shim_mask |= BIT(link_id);
-
- sdw->cdns.link_up = true;
-out:
- mutex_unlock(sdw->link_res->shim_lock);
-
- return ret;
-}
-
/* this needs to be called with shim_lock */
static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
{
@@ -389,15 +311,13 @@ static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
/* at this point Integration Glue has full control of the I/Os */
}
-static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
+/* this needs to be called with shim_lock */
+static void intel_shim_init(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int ret = 0;
u16 ioctl = 0, act = 0;
- mutex_lock(sdw->link_res->shim_lock);
-
/* Initialize Shim */
ioctl |= SDW_SHIM_IOCTL_BKE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
@@ -422,10 +342,17 @@ static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
act |= SDW_SHIM_CTMCTL_DODS;
intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
usleep_range(10, 15);
+}
- mutex_unlock(sdw->link_res->shim_lock);
+static int intel_shim_check_wake(struct sdw_intel *sdw)
+{
+ void __iomem *shim;
+ u16 wake_sts;
- return ret;
+ shim = sdw->link_res->shim;
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+
+ return wake_sts & BIT(sdw->instance);
}
static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
@@ -454,6 +381,88 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
mutex_unlock(sdw->link_res->shim_lock);
}
+static int intel_link_power_up(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+ struct sdw_master_prop *prop = &bus->prop;
+ u32 spa_mask, cpa_mask;
+ u32 link_control;
+ int ret = 0;
+ u32 syncprd;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /*
+ * The hardware relies on an internal counter, typically 4kHz,
+ * to generate the SoundWire SSP - which defines a 'safe'
+ * synchronization point between commands and audio transport
+ * and allows for multi link synchronization. The SYNCPRD value
+ * is only dependent on the oscillator clock provided to
+ * the IP, so adjust based on _DSD properties reported in DSDT
+ * tables. The values reported are based on either 24MHz
+ * (CNL/CML) or 38.4 MHz (ICL/TGL+).
+ */
+ if (prop->mclk_freq % 6000000)
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
+ else
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
+
+ if (!*shim_mask) {
+ dev_dbg(sdw->cdns.dev, "powering up all links\n");
+
+ /* we first need to program the SyncPRD/CPU registers */
+ dev_dbg(sdw->cdns.dev,
+ "first link up, programming SYNCPRD\n");
+
+ /* set SyncPRD period */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
+
+ /* Set SyncCPU bit */
+ sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ /* Link power up sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+
+ /* only power-up enabled links */
+ spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
+ cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
+
+ link_control |= spa_mask;
+
+ ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
+ goto out;
+ }
+
+ /* SyncCPU will change once link is active */
+ ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
+ SDW_SHIM_SYNC_SYNCCPU, 0);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev,
+ "Failed to set SHIM_SYNC: %d\n", ret);
+ goto out;
+ }
+ }
+
+ *shim_mask |= BIT(link_id);
+
+ sdw->cdns.link_up = true;
+
+ intel_shim_init(sdw);
+
+out:
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
static int intel_link_power_down(struct sdw_intel *sdw)
{
u32 link_control, spa_mask, cpa_mask;
@@ -476,7 +485,7 @@ static int intel_link_power_down(struct sdw_intel *sdw)
if (!*shim_mask) {
- dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
+ dev_dbg(sdw->cdns.dev, "powering down all links\n");
/* Link power down sequence */
link_control = intel_readl(shim, SDW_SHIM_LCTL);
@@ -1169,11 +1178,20 @@ static int intel_create_dai(struct sdw_cdns *cdns,
static int intel_register_dai(struct sdw_intel *sdw)
{
+ struct sdw_cdns_stream_config config;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_cdns_streams *stream;
struct snd_soc_dai_driver *dais;
int num_dai, ret, off = 0;
+ /* Read the PDI config and initialize cadence PDI */
+ intel_pdi_init(sdw, &config);
+ ret = sdw_cdns_pdi_init(cdns, config);
+ if (ret)
+ return ret;
+
+ intel_pdi_ch_update(sdw);
+
/* DAIs are created based on total number of PDIs supported */
num_dai = cdns->pcm.num_pdi;
@@ -1201,8 +1219,208 @@ static int intel_register_dai(struct sdw_intel *sdw)
if (ret)
return ret;
- return snd_soc_register_component(cdns->dev, &dai_component,
- dais, num_dai);
+ return devm_snd_soc_register_component(cdns->dev, &dai_component,
+ dais, num_dai);
+}
+
+static int intel_start_bus(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_bus *bus = &cdns->bus;
+ int ret;
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid timeouts when
+ * gsync is enabled
+ */
+ if (bus->multi_link)
+ intel_shim_sync_arm(sdw);
+
+ ret = sdw_cdns_init(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+
+ if (bus->multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+ }
+ sdw_cdns_check_self_clearing_bits(cdns, __func__,
+ true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+
+err_interrupt:
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+}
+
+static int intel_start_bus_after_reset(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_bus *bus = &cdns->bus;
+ bool clock_stop0;
+ int status;
+ int ret;
+
+ /*
+ * An exception condition occurs for the CLK_STOP_BUS_RESET
+ * case if one or more masters remain active. In this condition,
+ * all the masters are powered on for they are in the same power
+ * domain. Master can preserve its context for clock stop0, so
+ * there is no need to clear slave status and reset bus.
+ */
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+
+ if (!clock_stop0) {
+
+ /*
+ * make sure all Slaves are tagged as UNATTACHED and
+ * provide reason for reinitialization
+ */
+
+ status = SDW_UNATTACH_REQUEST_MASTER_RESET;
+ sdw_clear_slave_status(bus, status);
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid
+ * timeouts when gsync is enabled
+ */
+ if (bus->multi_link)
+ intel_shim_sync_arm(sdw);
+
+ /*
+ * Re-initialize the IP since it was powered-off
+ */
+ sdw_cdns_init(&sdw->cdns);
+
+ } else {
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
+ if (ret < 0) {
+ dev_err(dev, "unable to restart clock during resume\n");
+ goto err_interrupt;
+ }
+
+ if (!clock_stop0) {
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ goto err_interrupt;
+ }
+
+ if (bus->multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "sync go failed during resume\n");
+ goto err_interrupt;
+ }
+ }
+ }
+ sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+
+err_interrupt:
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+}
+
+static void intel_check_clock_stop(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ bool clock_stop0;
+
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+ if (!clock_stop0)
+ dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
+}
+
+static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ int ret;
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+ }
+
+ sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
+ true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+}
+
+static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ bool wake_enable = false;
+ int ret;
+
+ if (clock_stop) {
+ ret = sdw_cdns_clock_stop(cdns, true);
+ if (ret < 0)
+ dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
+ else
+ wake_enable = true;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = intel_link_power_down(sdw);
+ if (ret) {
+ dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ intel_shim_wake(sdw, wake_enable);
+
+ return 0;
}
static int sdw_master_read_intel_prop(struct sdw_bus *bus)
@@ -1254,7 +1472,7 @@ static int intel_prop_read(struct sdw_bus *bus)
}
static struct sdw_master_ops sdw_intel_ops = {
- .read_prop = sdw_master_read_prop,
+ .read_prop = intel_prop_read,
.override_adr = sdw_dmi_override_adr,
.xfer_msg = cdns_xfer_msg,
.xfer_msg_defer = cdns_xfer_msg_defer,
@@ -1265,20 +1483,6 @@ static struct sdw_master_ops sdw_intel_ops = {
.read_ping_status = cdns_read_ping_status,
};
-static int intel_init(struct sdw_intel *sdw)
-{
- bool clock_stop;
-
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
-
- clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
-
- intel_shim_init(sdw, clock_stop);
-
- return 0;
-}
-
/*
* probe and init (aux_dev_id argument is required by function prototype but not used)
*/
@@ -1308,11 +1512,11 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
cdns->msg_count = 0;
bus->link_id = auxdev->id;
+ bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
sdw_cdns_probe(cdns);
- /* Set property read ops */
- sdw_intel_ops.read_prop = intel_prop_read;
+ /* Set ops */
bus->ops = &sdw_intel_ops;
/* set driver data, accessed by snd_soc_dai_get_drvdata() */
@@ -1345,7 +1549,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
int intel_link_startup(struct auxiliary_device *auxdev)
{
- struct sdw_cdns_stream_config config;
struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -1366,7 +1569,6 @@ int intel_link_startup(struct auxiliary_device *auxdev)
multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
if (!multi_link) {
dev_dbg(dev, "Multi-link is disabled\n");
- bus->multi_link = false;
} else {
/*
* hardware-based synchronization is required regardless
@@ -1374,68 +1576,31 @@ int intel_link_startup(struct auxiliary_device *auxdev)
* synchronization is gated by gsync when the multi-master
* mode is set.
*/
- bus->multi_link = true;
bus->hw_sync_min_links = 1;
}
+ bus->multi_link = multi_link;
/* Initialize shim, controller */
- ret = intel_init(sdw);
- if (ret)
- goto err_init;
-
- /* Read the PDI config and initialize cadence PDI */
- intel_pdi_init(sdw, &config);
- ret = sdw_cdns_pdi_init(cdns, config);
+ ret = intel_link_power_up(sdw);
if (ret)
goto err_init;
- intel_pdi_ch_update(sdw);
-
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts\n");
- goto err_init;
- }
-
- /*
- * follow recommended programming flows to avoid timeouts when
- * gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP\n");
- goto err_interrupt;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence\n");
- goto err_interrupt;
- }
-
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed: %d\n", ret);
- goto err_interrupt;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, __func__,
- true, INTEL_MASTER_RESET_ITERATIONS);
-
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(dev, "DAI registration failed: %d\n", ret);
- snd_soc_unregister_component(dev);
- goto err_interrupt;
+ goto err_power_up;
}
intel_debugfs_init(sdw);
+ /* start bus */
+ ret = intel_start_bus(sdw);
+ if (ret) {
+ dev_err(dev, "bus start failed: %d\n", ret);
+ goto err_power_up;
+ }
+
/* Enable runtime PM */
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
pm_runtime_set_autosuspend_delay(dev,
@@ -1480,15 +1645,14 @@ int intel_link_startup(struct auxiliary_device *auxdev)
sdw->startup_done = true;
return 0;
-err_interrupt:
- sdw_cdns_enable_interrupt(cdns, false);
+err_power_up:
+ intel_link_power_down(sdw);
err_init:
return ret;
}
static void intel_link_remove(struct auxiliary_device *auxdev)
{
- struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1501,7 +1665,6 @@ static void intel_link_remove(struct auxiliary_device *auxdev)
if (!bus->prop.hw_disabled) {
intel_debugfs_exit(sdw);
sdw_cdns_enable_interrupt(cdns, false);
- snd_soc_unregister_component(dev);
}
sdw_bus_master_delete(bus);
}
@@ -1511,8 +1674,6 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
struct device *dev = &auxdev->dev;
struct sdw_intel *sdw;
struct sdw_bus *bus;
- void __iomem *shim;
- u16 wake_sts;
sdw = auxiliary_get_drvdata(auxdev);
bus = &sdw->cdns.bus;
@@ -1523,10 +1684,7 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
return 0;
}
- shim = sdw->link_res->shim;
- wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
-
- if (!(wake_sts & BIT(sdw->instance)))
+ if (!intel_shim_check_wake(sdw))
return 0;
/* disable WAKEEN interrupt ASAP to prevent interrupt flood */
@@ -1554,11 +1712,11 @@ static int intel_resume_child_device(struct device *dev, void *data)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
if (!slave->probed) {
- dev_dbg(dev, "%s: skipping device, no probed driver\n", __func__);
+ dev_dbg(dev, "skipping device, no probed driver\n");
return 0;
}
if (!slave->dev_num_sticky) {
- dev_dbg(dev, "%s: skipping device, never detected on bus\n", __func__);
+ dev_dbg(dev, "skipping device, never detected on bus\n");
return 0;
}
@@ -1644,7 +1802,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
}
if (pm_runtime_suspended(dev)) {
- dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
+ dev_dbg(dev, "pm_runtime status: suspended\n");
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
@@ -1665,20 +1823,12 @@ static int __maybe_unused intel_suspend(struct device *dev)
return 0;
}
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ ret = intel_stop_bus(sdw, false);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret);
return ret;
}
- intel_shim_wake(sdw, false);
-
return 0;
}
@@ -1699,44 +1849,19 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
-
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ ret = intel_stop_bus(sdw, false);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus during teardown: %d\n",
+ __func__, ret);
return ret;
}
-
- intel_shim_wake(sdw, false);
-
- } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
- !clock_stop_quirks) {
- bool wake_enable = true;
-
- ret = sdw_cdns_clock_stop(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable clock stop on suspend\n");
- wake_enable = false;
- }
-
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) {
+ ret = intel_stop_bus(sdw, true);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n",
+ __func__, ret);
return ret;
}
-
- intel_shim_wake(sdw, wake_enable);
} else {
dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
__func__, clock_stop_quirks);
@@ -1752,7 +1877,6 @@ static int __maybe_unused intel_resume(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
int link_flags;
- bool multi_link;
int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1762,10 +1886,9 @@ static int __maybe_unused intel_resume(struct device *dev)
}
link_flags = md_flags >> (bus->link_id * 8);
- multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
if (pm_runtime_suspended(dev)) {
- dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
+ dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
/* follow required sequence from runtime_pm.rst */
pm_runtime_disable(dev);
@@ -1779,7 +1902,7 @@ static int __maybe_unused intel_resume(struct device *dev)
pm_runtime_idle(dev);
}
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
@@ -1791,41 +1914,13 @@ static int __maybe_unused intel_resume(struct device *dev)
*/
sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid timeouts when
- * gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP during resume\n");
- return ret;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
+ ret = intel_start_bus(sdw);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "cannot start bus during resume\n");
+ intel_link_power_down(sdw);
return ret;
}
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed during resume\n");
- return ret;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, __func__,
- true, INTEL_MASTER_RESET_ITERATIONS);
-
/*
* after system resume, the pm_runtime suspend() may kick in
* during the enumeration, before any children device force the
@@ -1838,7 +1933,7 @@ static int __maybe_unused intel_resume(struct device *dev)
*/
pm_runtime_mark_last_busy(dev);
- return ret;
+ return 0;
}
static int __maybe_unused intel_resume_runtime(struct device *dev)
@@ -1847,10 +1942,6 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
u32 clock_stop_quirks;
- bool clock_stop0;
- int link_flags;
- bool multi_link;
- int status;
int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1862,15 +1953,12 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
/* unconditionally disable WAKEEN interrupt */
intel_shim_wake(sdw, false);
- link_flags = md_flags >> (bus->link_id * 8);
- multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
-
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret);
return ret;
}
@@ -1880,145 +1968,45 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
*/
sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid
- * timeouts when gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP during resume\n");
- return ret;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
+ ret = intel_start_bus(sdw);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed during resume\n");
- return ret;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime TEARDOWN",
- true, INTEL_MASTER_RESET_ITERATIONS);
} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret);
return ret;
}
- /*
- * An exception condition occurs for the CLK_STOP_BUS_RESET
- * case if one or more masters remain active. In this condition,
- * all the masters are powered on for they are in the same power
- * domain. Master can preserve its context for clock stop0, so
- * there is no need to clear slave status and reset bus.
- */
- clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
-
- if (!clock_stop0) {
-
- /*
- * make sure all Slaves are tagged as UNATTACHED and
- * provide reason for reinitialization
- */
-
- status = SDW_UNATTACH_REQUEST_MASTER_RESET;
- sdw_clear_slave_status(bus, status);
-
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid
- * timeouts when gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- /*
- * Re-initialize the IP since it was powered-off
- */
- sdw_cdns_init(&sdw->cdns);
-
- } else {
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
- }
-
- ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
+ ret = intel_start_bus_after_reset(sdw);
if (ret < 0) {
- dev_err(dev, "unable to restart clock during resume\n");
+ dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
-
- if (!clock_stop0) {
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
- return ret;
- }
-
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "sync go failed during resume\n");
- return ret;
- }
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime BUS_RESET",
- true, INTEL_MASTER_RESET_ITERATIONS);
-
} else if (!clock_stop_quirks) {
- clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
- if (!clock_stop0)
- dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
+ intel_check_clock_stop(sdw);
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed: %d\n", __func__, ret);
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = intel_start_bus_after_clock_stop(sdw);
if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
+ dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
-
- ret = sdw_cdns_clock_restart(cdns, false);
- if (ret < 0) {
- dev_err(dev, "unable to resume master during resume\n");
- return ret;
- }
-
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
- true, INTEL_MASTER_RESET_ITERATIONS);
} else {
- dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
+ dev_err(dev, "%s: clock_stop_quirks %x unsupported\n",
__func__, clock_stop_quirks);
ret = -EINVAL;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 824f4f32d4dc..d091513919df 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -306,7 +306,7 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
/* Check SNDWLCAP.LCOUNT */
caps = ioread32(ctx->mmio_base + ctx->shim_base + SDW_SHIM_LCAP);
- caps &= GENMASK(2, 0);
+ caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
/* Check HW supported vs property value */
if (caps < ctx->count) {
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 3a992a6478c3..b33d5db494a5 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -420,7 +420,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
- for (dev_num = 0; dev_num <= SDW_MAX_DEVICES; dev_num++) {
+ for (dev_num = 1; dev_num <= SDW_MAX_DEVICES; dev_num++) {
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
@@ -440,7 +440,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
ctrl->slave_status = val;
- for (i = 0; i <= SDW_MAX_DEVICES; i++) {
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
u32 s;
s = (val >> (i * 2));
@@ -573,11 +573,10 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
break;
case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
- dev_err_ratelimited(swrm->dev, "%s: SWR new slave attached\n",
- __func__);
+ dev_dbg_ratelimited(swrm->dev, "SWR new slave attached\n");
swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
if (swrm->slave_status == slave_status) {
- dev_err(swrm->dev, "Slave status not changed %x\n",
+ dev_dbg(swrm->dev, "Slave status not changed %x\n",
slave_status);
} else {
qcom_swrm_get_device_status(swrm);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2113be40b5a9..2cf3203b2397 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -91,7 +91,7 @@ enum pmic_arb_channel {
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 512
-#define PMIC_ARB_TIMEOUT_US 100
+#define PMIC_ARB_TIMEOUT_US 1000
#define PMIC_ARB_MAX_TRANS_BYTES (8)
#define PMIC_ARB_APID_MASK 0xFF
@@ -590,23 +590,16 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
u8 per = ppid & 0xFF;
u8 irq_mask = BIT(id);
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "%s apid=%d sid=0x%x per=0x%x irq=%d\n",
+ __func__, apid, sid, per, id);
writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
-
- if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
-
- if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
+static int periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
{
unsigned int irq;
u32 status, id;
+ int handled = 0;
u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
@@ -621,7 +614,10 @@ static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
continue;
}
generic_handle_irq(irq);
+ handled++;
}
+
+ return handled;
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
@@ -629,28 +625,66 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc);
const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
struct irq_chip *chip = irq_desc_get_chip(desc);
- int first = pmic_arb->min_apid >> 5;
- int last = pmic_arb->max_apid >> 5;
+ int first = pmic_arb->min_apid;
+ int last = pmic_arb->max_apid;
u8 ee = pmic_arb->ee;
- u32 status, enable;
+ u32 status, enable, handled = 0;
int i, id, apid;
+ /* status based dispatch */
+ bool acc_valid = false;
+ u32 irq_status = 0;
chained_irq_enter(chip, desc);
- for (i = first; i <= last; ++i) {
+ for (i = first >> 5; i <= last >> 5; ++i) {
status = readl_relaxed(
ver_ops->owner_acc_status(pmic_arb, ee, i));
+ if (status)
+ acc_valid = true;
+
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
apid = id + i * 32;
+ if (apid < first || apid > last) {
+ WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+ apid);
+ continue;
+ }
enable = readl_relaxed(
ver_ops->acc_enable(pmic_arb, apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
- periph_interrupt(pmic_arb, apid);
+ if (periph_interrupt(pmic_arb, apid) != 0)
+ handled++;
+ }
+ }
+
+ /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+ if (!acc_valid) {
+ for (i = first; i <= last; i++) {
+ /* skip if APPS is not irq owner */
+ if (pmic_arb->apid_data[i].irq_ee != pmic_arb->ee)
+ continue;
+
+ irq_status = readl_relaxed(
+ ver_ops->irq_status(pmic_arb, i));
+ if (irq_status) {
+ enable = readl_relaxed(
+ ver_ops->acc_enable(pmic_arb, i));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+ dev_dbg(&pmic_arb->spmic->dev,
+ "Dispatching IRQ for apid=%d status=%x\n",
+ i, irq_status);
+ if (periph_interrupt(pmic_arb, i) != 0)
+ handled++;
+ }
+ }
}
}
+ if (handled == 0)
+ handle_bad_irq(desc);
+
chained_irq_exit(chip, desc);
}
@@ -770,6 +804,7 @@ static int qpnpint_irq_domain_activate(struct irq_domain *domain,
u16 apid = hwirq_to_apid(d->hwirq);
u16 sid = hwirq_to_sid(d->hwirq);
u16 irq = hwirq_to_irq(d->hwirq);
+ u8 buf;
if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) {
dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
@@ -778,6 +813,10 @@ static int qpnpint_irq_domain_activate(struct irq_domain *domain,
return -ENODEV;
}
+ buf = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
+
return 0;
}
@@ -992,7 +1031,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
* version 5, there is more than one APID mapped to each PPID.
* The owner field for each of these mappings specifies the EE which is
* allowed to write to the APID. The owner of the last (highest) APID
- * for a given PPID will receive interrupts from the PPID.
+ * which has the IRQ owner bit set for a given PPID will receive
+ * interrupts from the PPID.
*/
for (i = 0; ; i++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(i);
@@ -1015,16 +1055,16 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
prev_apidd = &pmic_arb->apid_data[apid];
- if (valid && is_irq_ee &&
- prev_apidd->write_ee == pmic_arb->ee) {
+ if (!valid || apidd->write_ee == pmic_arb->ee) {
+ /* First PPID mapping or one for this EE */
+ pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
+ } else if (valid && is_irq_ee &&
+ prev_apidd->write_ee == pmic_arb->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
* override the irq owner
*/
prev_apidd->irq_ee = apidd->irq_ee;
- } else if (!valid || is_irq_ee) {
- /* First PPID mapping or duplicate for another EE */
- pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
}
apidd->ppid = ppid;
@@ -1093,6 +1133,11 @@ static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
offset = 0x10000 * pmic_arb->ee + 0x80 * apid;
break;
case PMIC_ARB_CHANNEL_RW:
+ if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) {
+ dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n",
+ sid, addr);
+ return -EPERM;
+ }
offset = 0x10000 * apid;
break;
}
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index a456ce5141e1..55381592bb5a 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -35,7 +35,7 @@ static void spmi_ctrl_release(struct device *dev)
{
struct spmi_controller *ctrl = to_spmi_controller(dev);
- ida_simple_remove(&ctrl_ida, ctrl->nr);
+ ida_free(&ctrl_ida, ctrl->nr);
kfree(ctrl);
}
@@ -457,7 +457,7 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
ctrl->dev.of_node = parent->of_node;
spmi_controller_set_drvdata(ctrl, &ctrl[1]);
- id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate SPMI controller identifier.\n");
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 2de3896489c8..897cb8db5084 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -132,7 +132,8 @@ static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
+
ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
return IRQ_HANDLED;
@@ -330,7 +331,8 @@ static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
+
ssb_extif_gpio_polarity(extif, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3bd80f9695ac..5cfabd5376cc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -60,10 +60,6 @@ source "drivers/staging/board/Kconfig"
source "drivers/staging/gdm724x/Kconfig"
-source "drivers/staging/fwserial/Kconfig"
-
-source "drivers/staging/clocking-wizard/Kconfig"
-
source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/most/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 1d9ae39fea14..f8c3aa9c2418 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -20,8 +20,6 @@ obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
-obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_KS7010) += ks7010/
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
deleted file mode 100644
index 2324b5d73788..000000000000
--- a/drivers/staging/clocking-wizard/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Xilinx Clocking Wizard Driver
-#
-
-config COMMON_CLK_XLNX_CLKWZRD
- tristate "Xilinx Clocking Wizard"
- depends on COMMON_CLK && OF && HAS_IOMEM
- help
- Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
deleted file mode 100644
index b1f915224d96..000000000000
--- a/drivers/staging/clocking-wizard/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
deleted file mode 100644
index c7e1dc58dfba..000000000000
--- a/drivers/staging/clocking-wizard/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO:
- - support for fractional multiplier
- - support for fractional divider (output 0 only)
- - support for set_rate() operations (may benefit from Stephen Boyd's
- refactoring of the clk primitives:
- https://lore.kernel.org/lkml/1409957256-23729-1-git-send-email-sboyd@codeaurora.org)
- - review arithmetic
- - overflow after multiplication?
- - maximize accuracy before divisions
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
deleted file mode 100644
index efb67ff9f76c..000000000000
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Binding for Xilinx Clocking Wizard IP Core
-
-This binding uses the common clock binding[1]. Details about the devices can be
-found in the product guide[2].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Clocking Wizard Product Guide
-https://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-clk-wiz.pdf
-
-Required properties:
- - compatible: Must be 'xlnx,clocking-wizard'
- - reg: Base and size of the cores register space
- - clocks: Handle to input clock
- - clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
- - clock-output-names: Names for the output clocks
-
-Optional properties:
- - speed-grade: Speed grade of the device (valid values are 1..3)
-
-Example:
- clock-generator@40040000 {
- reg = <0x40040000 0x1000>;
- compatible = "xlnx,clocking-wizard";
- speed-grade = <1>;
- clock-names = "clk_in1", "s_axi_aclk";
- clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
- "clk_out3", "clk_out4", "clk_out5",
- "clk_out6", "clk_out7";
- };
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
deleted file mode 100644
index 6964aac2a7ed..000000000000
--- a/drivers/staging/fwserial/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
- connectivity to cabled peers. This driver implements a
- ad-hoc transport protocol and is currently limited to
- Linux-to-Linux communication.
-
- To compile this driver as a module, say M here: the module will
- be called firewire-serial.
-
-if FIREWIRE_SERIAL
-
-config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
- firewire-serial driver to support.
-
-config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
- adapter supports. The actual number of serial ports registered
- is set with the module parameter "ttys".
-
-endif
diff --git a/drivers/staging/fwserial/Makefile b/drivers/staging/fwserial/Makefile
deleted file mode 100644
index 1cd5c5c7e805..000000000000
--- a/drivers/staging/fwserial/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_FIREWIRE_SERIAL) += firewire-serial.o
-firewire-serial-objs := fwserial.o dma_fifo.o
diff --git a/drivers/staging/fwserial/TODO b/drivers/staging/fwserial/TODO
deleted file mode 100644
index 382a7959407c..000000000000
--- a/drivers/staging/fwserial/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
-TODOs prior to this driver moving out of staging
-------------------------------------------------
-1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- - I/O is handled asynchronously which presents some issues when error
- conditions occur.
-2. Implement _robust_ console on top of this. The existing prototype console
- driver is not ready for the big leagues yet.
-3. Expose means of controlling attach/detach of peers via sysfs. Include
- GUID-to-port matching/whitelist/blacklist.
-
--- Issues with firewire stack --
-1. This driver uses the same unregistered vendor id that the firewire core does
- (0xd00d1e). Perhaps this could be exposed as a define in
- firewire.h?
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
deleted file mode 100644
index 5dcbab6fd622..000000000000
--- a/drivers/staging/fwserial/dma_fifo.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * DMA-able FIFO implementation
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/bug.h>
-
-#include "dma_fifo.h"
-
-#ifdef DEBUG_TRACING
-#define df_trace(s, args...) pr_debug(s, ##args)
-#else
-#define df_trace(s, args...)
-#endif
-
-#define FAIL(fifo, condition, format...) ({ \
- fifo->corrupt = !!(condition); \
- WARN(fifo->corrupt, format); \
-})
-
-/*
- * private helper fn to determine if check is in open interval (lo,hi)
- */
-static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
-{
- return check - (lo + 1) < (hi - 1) - lo;
-}
-
-/**
- * dma_fifo_init: initialize the fifo to a valid but inoperative state
- * @fifo: address of in-place "struct dma_fifo" object
- */
-void dma_fifo_init(struct dma_fifo *fifo)
-{
- memset(fifo, 0, sizeof(*fifo));
- INIT_LIST_HEAD(&fifo->pending);
-}
-
-/**
- * dma_fifo_alloc - initialize and allocate dma_fifo
- * @fifo: address of in-place "struct dma_fifo" object
- * @size: 'apparent' size, in bytes, of fifo
- * @align: dma alignment to maintain (should be at least cpu cache alignment),
- * must be power of 2
- * @tx_limit: maximum # of bytes transmissible per dma (rounded down to
- * multiple of alignment, but at least align size)
- * @open_limit: maximum # of outstanding dma transactions allowed
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
- *
- * The 'apparent' size will be rounded up to next greater aligned size.
- * Returns 0 if no error, otherwise an error code
- */
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
- int tx_limit, int open_limit, gfp_t gfp_mask)
-{
- int capacity;
-
- if (!is_power_of_2(align) || size < 0)
- return -EINVAL;
-
- size = round_up(size, align);
- capacity = size + align * open_limit + align * DMA_FIFO_GUARD;
- fifo->data = kmalloc(capacity, gfp_mask);
- if (!fifo->data)
- return -ENOMEM;
-
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->size = size;
- fifo->avail = size;
- fifo->align = align;
- fifo->tx_limit = max_t(int, round_down(tx_limit, align), align);
- fifo->open = 0;
- fifo->open_limit = open_limit;
- fifo->guard = size + align * open_limit;
- fifo->capacity = capacity;
- fifo->corrupt = 0;
-
- return 0;
-}
-
-/**
- * dma_fifo_free - frees the fifo
- * @fifo: address of in-place "struct dma_fifo" to free
- *
- * Also reinits the fifo to a valid but inoperative state. This
- * allows the fifo to be reused with a different target requiring
- * different fifo parameters.
- */
-void dma_fifo_free(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (!fifo->data)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- kfree(fifo->data);
- fifo->data = NULL;
-}
-
-/**
- * dma_fifo_reset - dumps the fifo contents and reinits for reuse
- * @fifo: address of in-place "struct dma_fifo" to reset
- */
-void dma_fifo_reset(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (!fifo->data)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->avail = fifo->size;
- fifo->open = 0;
- fifo->corrupt = 0;
-}
-
-/**
- * dma_fifo_in - copies data into the fifo
- * @fifo: address of in-place "struct dma_fifo" to write to
- * @src: buffer to copy from
- * @n: # of bytes to copy
- *
- * Returns the # of bytes actually copied, which can be less than requested if
- * the fifo becomes full. If < 0, return is error code.
- */
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
-{
- int ofs, l;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- if (n > fifo->avail)
- n = fifo->avail;
- if (n <= 0)
- return 0;
-
- ofs = fifo->in % fifo->capacity;
- l = min(n, fifo->capacity - ofs);
- memcpy(fifo->data + ofs, src, l);
- memcpy(fifo->data, src + l, n - l);
-
- if (FAIL(fifo, addr_check(fifo->done, fifo->in, fifo->in + n) ||
- fifo->avail < n,
- "fifo corrupt: in:%u out:%u done:%u n:%d avail:%d",
- fifo->in, fifo->out, fifo->done, n, fifo->avail))
- return -ENXIO;
-
- fifo->in += n;
- fifo->avail -= n;
-
- df_trace("in:%u out:%u done:%u n:%d avail:%d", fifo->in, fifo->out,
- fifo->done, n, fifo->avail);
-
- return n;
-}
-
-/**
- * dma_fifo_out_pend - gets address/len of next avail read and marks as pended
- * @fifo: address of in-place "struct dma_fifo" to read from
- * @pended: address of structure to fill with read address/len
- * The data/len fields will be NULL/0 if no dma is pended.
- *
- * Returns the # of used bytes remaining in fifo (ie, if > 0, more data
- * remains in the fifo that was not pended). If < 0, return is error code.
- */
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
-{
- unsigned int len, n, ofs, l, limit;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- pended->len = 0;
- pended->data = NULL;
- pended->out = fifo->out;
-
- len = fifo->in - fifo->out;
- if (!len)
- return -ENODATA;
- if (fifo->open == fifo->open_limit)
- return -EAGAIN;
-
- n = len;
- ofs = fifo->out % fifo->capacity;
- l = fifo->capacity - ofs;
- limit = min_t(unsigned int, l, fifo->tx_limit);
- if (n > limit) {
- n = limit;
- fifo->out += limit;
- } else if (ofs + n > fifo->guard) {
- fifo->out += l;
- fifo->in = fifo->out;
- } else {
- fifo->out += round_up(n, fifo->align);
- fifo->in = fifo->out;
- }
-
- df_trace("in: %u out: %u done: %u n: %d len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, n, len, fifo->avail);
-
- pended->len = n;
- pended->data = fifo->data + ofs;
- pended->next = fifo->out;
- list_add_tail(&pended->link, &fifo->pending);
- ++fifo->open;
-
- if (FAIL(fifo, fifo->open > fifo->open_limit,
- "past open limit:%d (limit:%d)",
- fifo->open, fifo->open_limit))
- return -ENXIO;
- if (FAIL(fifo, fifo->out & (fifo->align - 1),
- "fifo out unaligned:%u (align:%u)",
- fifo->out, fifo->align))
- return -ENXIO;
-
- return len - n;
-}
-
-/**
- * dma_fifo_out_complete - marks pended dma as completed
- * @fifo: address of in-place "struct dma_fifo" which was read from
- * @complete: address of structure for previously pended dma to mark completed
- */
-int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete)
-{
- struct dma_pending *pending, *next, *tmp;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
- if (list_empty(&fifo->pending) && fifo->open == 0)
- return -EINVAL;
-
- if (FAIL(fifo, list_empty(&fifo->pending) != (fifo->open == 0),
- "pending list disagrees with open count:%d",
- fifo->open))
- return -ENXIO;
-
- tmp = complete->data;
- *tmp = *complete;
- list_replace(&complete->link, &tmp->link);
- dp_mark_completed(tmp);
-
- /* Only update the fifo in the original pended order */
- list_for_each_entry_safe(pending, next, &fifo->pending, link) {
- if (!dp_is_completed(pending)) {
- df_trace("still pending: saved out: %u len: %d",
- pending->out, pending->len);
- break;
- }
-
- if (FAIL(fifo, pending->out != fifo->done ||
- addr_check(fifo->in, fifo->done, pending->next),
- "in:%u out:%u done:%u saved:%u next:%u",
- fifo->in, fifo->out, fifo->done, pending->out,
- pending->next))
- return -ENXIO;
-
- list_del_init(&pending->link);
- fifo->done = pending->next;
- fifo->avail += pending->len;
- --fifo->open;
-
- df_trace("in: %u out: %u done: %u len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, pending->len, fifo->avail);
- }
-
- if (FAIL(fifo, fifo->open < 0, "open dma:%d < 0", fifo->open))
- return -ENXIO;
- if (FAIL(fifo, fifo->avail > fifo->size, "fifo avail:%d > size:%d",
- fifo->avail, fifo->size))
- return -ENXIO;
-
- return 0;
-}
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
deleted file mode 100644
index c46a06336975..000000000000
--- a/drivers/staging/fwserial/dma_fifo.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * DMA-able FIFO interface
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#ifndef _DMA_FIFO_H_
-#define _DMA_FIFO_H_
-
-/**
- * The design basis for the DMA FIFO is to provide an output side that
- * complies with the streaming DMA API design that can be DMA'd from directly
- * (without additional copying), coupled with an input side that maintains a
- * logically consistent 'apparent' size (ie, bytes in + bytes avail is static
- * for the lifetime of the FIFO).
- *
- * DMA output transactions originate on a cache line boundary and can be
- * variably-sized. DMA output transactions can be retired out-of-order but
- * the FIFO will only advance the output in the original input sequence.
- * This means the FIFO will eventually stall if a transaction is never retired.
- *
- * Chunking the output side into cache line multiples means that some FIFO
- * memory is unused. For example, if all the avail input has been pended out,
- * then the in and out markers are re-aligned to the next cache line.
- * The maximum possible waste is
- * (cache line alignment - 1) * (max outstanding dma transactions)
- * This potential waste requires additional hidden capacity within the FIFO
- * to be able to accept input while the 'apparent' size has not been reached.
- *
- * Additional cache lines (ie, guard area) are used to minimize DMA
- * fragmentation when wrapping at the end of the FIFO. Input is allowed into the
- * guard area, but the in and out FIFO markers are wrapped when DMA is pended.
- */
-
-#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
-
-struct dma_fifo {
- unsigned int in;
- unsigned int out; /* updated when dma is pended */
- unsigned int done; /* updated upon dma completion */
- struct {
- unsigned corrupt:1;
- };
- int size; /* 'apparent' size of fifo */
- int guard; /* ofs of guard area */
- int capacity; /* size + reserved */
- int avail; /* # of unused bytes in fifo */
- unsigned int align; /* must be power of 2 */
- int tx_limit; /* max # of bytes per dma transaction */
- int open_limit; /* max # of outstanding allowed */
- int open; /* # of outstanding dma transactions */
- struct list_head pending; /* fifo markers for outstanding dma */
- void *data;
-};
-
-struct dma_pending {
- struct list_head link;
- void *data;
- unsigned int len;
- unsigned int next;
- unsigned int out;
-};
-
-static inline void dp_mark_completed(struct dma_pending *dp)
-{
- dp->data += 1;
-}
-
-static inline bool dp_is_completed(struct dma_pending *dp)
-{
- return (unsigned long)dp->data & 1UL;
-}
-
-void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
- int tx_limit, int open_limit, gfp_t gfp_mask);
-void dma_fifo_free(struct dma_fifo *fifo);
-void dma_fifo_reset(struct dma_fifo *fifo);
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n);
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended);
-int dma_fifo_out_complete(struct dma_fifo *fifo,
- struct dma_pending *complete);
-
-/* returns the # of used bytes in the fifo */
-static inline int dma_fifo_level(struct dma_fifo *fifo)
-{
- return fifo->size - fifo->avail;
-}
-
-/* returns the # of bytes ready for output in the fifo */
-static inline int dma_fifo_out_level(struct dma_fifo *fifo)
-{
- return fifo->in - fifo->out;
-}
-
-/* returns the # of unused bytes in the fifo */
-static inline int dma_fifo_avail(struct dma_fifo *fifo)
-{
- return fifo->avail;
-}
-
-/* returns true if fifo has max # of outstanding dmas */
-static inline bool dma_fifo_busy(struct dma_fifo *fifo)
-{
- return fifo->open == fifo->open_limit;
-}
-
-/* changes the max size of dma returned from dma_fifo_out_pend() */
-static inline int dma_fifo_change_tx_limit(struct dma_fifo *fifo, int tx_limit)
-{
- tx_limit = round_down(tx_limit, fifo->align);
- fifo->tx_limit = max_t(int, tx_limit, fifo->align);
- return 0;
-}
-
-#endif /* _DMA_FIFO_H_ */
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
deleted file mode 100644
index e8fa7f53cd5e..000000000000
--- a/drivers/staging/fwserial/fwserial.c
+++ /dev/null
@@ -1,2890 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * FireWire Serial driver
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/rculist.h>
-#include <linux/workqueue.h>
-#include <linux/ratelimit.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-#include "fwserial.h"
-
-inline u64 be32_to_u64(__be32 hi, __be32 lo)
-{
- return ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo));
-}
-
-#define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */
-#define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */
-
-/* configurable options */
-static int num_ttys = 4; /* # of std ttys to create per fw_card */
- /* - doubles as loopback port index */
-static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
-static bool create_loop_dev = true; /* create a loopback device for each card */
-
-module_param_named(ttys, num_ttys, int, 0644);
-module_param_named(auto, auto_connect, bool, 0644);
-module_param_named(loop, create_loop_dev, bool, 0644);
-
-/*
- * Threshold below which the tty is woken for writing
- * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because
- * even if the writer is woken, n_tty_poll() won't set EPOLLOUT until
- * our fifo is below this level
- */
-#define WAKEUP_CHARS 256
-
-/*
- * fwserial_list: list of every fw_serial created for each fw_card
- * See discussion in fwserial_probe.
- */
-static LIST_HEAD(fwserial_list);
-static DEFINE_MUTEX(fwserial_list_mutex);
-
-/*
- * port_table: array of tty ports allocated to each fw_card
- *
- * tty ports are allocated during probe when an fw_serial is first
- * created for a given fw_card. Ports are allocated in a contiguous block,
- * each block consisting of 'num_ports' ports.
- */
-static struct fwtty_port *port_table[MAX_TOTAL_PORTS];
-static DEFINE_MUTEX(port_table_lock);
-static bool port_table_corrupt;
-#define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS
-
-#define loop_idx(port) (((port)->index) / num_ports)
-#define table_idx(loop) ((loop) * num_ports + num_ttys)
-
-/* total # of tty ports created per fw_card */
-static int num_ports;
-
-/* slab used as pool for struct fwtty_transactions */
-static struct kmem_cache *fwtty_txn_cache;
-
-struct tty_driver *fwtty_driver;
-static struct tty_driver *fwloop_driver;
-
-static struct dentry *fwserial_debugfs;
-
-struct fwtty_transaction;
-typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn);
-
-struct fwtty_transaction {
- struct fw_transaction fw_txn;
- fwtty_transaction_cb callback;
- struct fwtty_port *port;
- union {
- struct dma_pending dma_pended;
- };
-};
-
-#define to_device(a, b) (a->b)
-#define fwtty_err(p, fmt, ...) \
- dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_info(p, fmt, ...) \
- dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_notice(p, fmt, ...) \
- dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_dbg(p, fmt, ...) \
- dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
-#define fwtty_err_ratelimited(p, fmt, ...) \
- dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
-
-#ifdef DEBUG
-static inline void debug_short_write(struct fwtty_port *port, int c, int n)
-{
- int avail;
-
- if (n < c) {
- spin_lock_bh(&port->lock);
- avail = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n",
- avail, c, n);
- }
-}
-#else
-#define debug_short_write(port, c, n)
-#endif
-
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id);
-
-#ifdef FWTTY_PROFILING
-
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
-{
- spin_lock_bh(&port->lock);
- fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
- spin_unlock_bh(&port->lock);
-}
-
-static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
-{
- /* for each stat, print sum of 0 to 2^k, then individually */
- int k = 4;
- unsigned int sum;
- int j;
- char t[10];
-
- snprintf(t, 10, "< %d", 1 << k);
- seq_printf(m, "\n%14s %6s", " ", t);
- for (j = k + 1; j < DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", 1 << j);
-
- ++k;
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->reads[j];
- seq_printf(m, "\n%14s: %6d", "reads", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->reads[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->writes[j];
- seq_printf(m, "\n%14s: %6d", "writes", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->writes[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->txns[j];
- seq_printf(m, "\n%14s: %6d", "txns", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->txns[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->unthrottle[j];
- seq_printf(m, "\n%14s: %6d", "avail @ unthr", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->unthrottle[j]);
-}
-
-#else
-#define fwtty_profile_fifo(port, stat)
-#define fwtty_dump_profile(m, stats)
-#endif
-
-/*
- * Returns the max receive packet size for the given node
- * Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant
- * are required by specification to support max_rec of 8 (512 bytes) or more.
- */
-static inline int device_max_receive(struct fw_device *fw_device)
-{
- /* see IEEE 1394-2008 table 8-8 */
- return min(2 << fw_device->max_rec, 4096);
-}
-
-static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
-{
- switch (rcode) {
- case RCODE_SEND_ERROR:
- fwtty_err_ratelimited(port, "card busy\n");
- break;
- case RCODE_ADDRESS_ERROR:
- fwtty_err_ratelimited(port, "bad unit addr or write length\n");
- break;
- case RCODE_DATA_ERROR:
- fwtty_err_ratelimited(port, "failed rx\n");
- break;
- case RCODE_NO_ACK:
- fwtty_err_ratelimited(port, "missing ack\n");
- break;
- case RCODE_BUSY:
- fwtty_err_ratelimited(port, "remote busy\n");
- break;
- default:
- fwtty_err_ratelimited(port, "failed tx: %d\n", rcode);
- }
-}
-
-static void fwtty_common_callback(struct fw_card *card, int rcode,
- void *payload, size_t len, void *cb_data)
-{
- struct fwtty_transaction *txn = cb_data;
- struct fwtty_port *port = txn->port;
-
- if (port && rcode != RCODE_COMPLETE)
- fwtty_log_tx_error(port, rcode);
- if (txn->callback)
- txn->callback(card, rcode, payload, len, txn);
- kmem_cache_free(fwtty_txn_cache, txn);
-}
-
-static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- struct fwtty_transaction *txn;
- int generation;
-
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn)
- return -ENOMEM;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
- return 0;
-}
-
-static void fwtty_send_txn_async(struct fwtty_peer *peer,
- struct fwtty_transaction *txn, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- int generation;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
-}
-
-static void __fwtty_restart_tx(struct fwtty_port *port)
-{
- int len, avail;
-
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len)
- schedule_delayed_work(&port->drain, 0);
- avail = dma_fifo_avail(&port->tx_fifo);
-
- fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail);
-}
-
-static void fwtty_restart_tx(struct fwtty_port *port)
-{
- spin_lock_bh(&port->lock);
- __fwtty_restart_tx(port);
- spin_unlock_bh(&port->lock);
-}
-
-/*
- * fwtty_update_port_status - decodes & dispatches line status changes
- *
- * Note: in loopback, the port->lock is being held. Only use functions that
- * don't attempt to reclaim the port->lock.
- */
-static void fwtty_update_port_status(struct fwtty_port *port,
- unsigned int status)
-{
- unsigned int delta;
- struct tty_struct *tty;
-
- /* simulated LSR/MSR status from remote */
- status &= ~MCTRL_MASK;
- delta = (port->mstatus ^ status) & ~MCTRL_MASK;
- delta &= ~(status & TIOCM_RNG);
- port->mstatus = status;
-
- if (delta & TIOCM_RNG)
- ++port->icount.rng;
- if (delta & TIOCM_DSR)
- ++port->icount.dsr;
- if (delta & TIOCM_CAR)
- ++port->icount.dcd;
- if (delta & TIOCM_CTS)
- ++port->icount.cts;
-
- fwtty_dbg(port, "status: %x delta: %x\n", status, delta);
-
- if (delta & TIOCM_CAR) {
- tty = tty_port_tty_get(&port->port);
- if (tty && !C_CLOCAL(tty)) {
- if (status & TIOCM_CAR)
- wake_up_interruptible(&port->port.open_wait);
- else
- schedule_work(&port->hangup);
- }
- tty_kref_put(tty);
- }
-
- if (delta & TIOCM_CTS) {
- tty = tty_port_tty_get(&port->port);
- if (tty && C_CRTSCTS(tty)) {
- if (tty->hw_stopped) {
- if (status & TIOCM_CTS) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (~status & TIOCM_CTS)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
-
- } else if (delta & OOB_TX_THROTTLE) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (tty->hw_stopped) {
- if (~status & OOB_TX_THROTTLE) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (status & OOB_TX_THROTTLE)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
- }
-
- if (delta & (UART_LSR_BI << 24)) {
- if (status & (UART_LSR_BI << 24)) {
- port->break_last = jiffies;
- schedule_delayed_work(&port->emit_breaks, 0);
- } else {
- /* run emit_breaks one last time (if pending) */
- mod_delayed_work(system_wq, &port->emit_breaks, 0);
- }
- }
-
- if (delta & (TIOCM_DSR | TIOCM_CAR | TIOCM_CTS | TIOCM_RNG))
- wake_up_interruptible(&port->port.delta_msr_wait);
-}
-
-/*
- * __fwtty_port_line_status - generate 'line status' for indicated port
- *
- * This function returns a remote 'MSR' state based on the local 'MCR' state,
- * as if a null modem cable was attached. The actual status is a mangling
- * of TIOCM_* bits suitable for sending to a peer's status_addr.
- *
- * Note: caller must be holding port lock
- */
-static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
-{
- unsigned int status = 0;
-
- /* TODO: add module param to tie RNG to DTR as well */
-
- if (port->mctrl & TIOCM_DTR)
- status |= TIOCM_DSR | TIOCM_CAR;
- if (port->mctrl & TIOCM_RTS)
- status |= TIOCM_CTS;
- if (port->mctrl & OOB_RX_THROTTLE)
- status |= OOB_TX_THROTTLE;
- /* emulate BRK as add'l line status */
- if (port->break_ctl)
- status |= UART_LSR_BI << 24;
-
- return status;
-}
-
-/*
- * __fwtty_write_port_status - send the port line status to peer
- *
- * Note: caller must be holding the port lock.
- */
-static int __fwtty_write_port_status(struct fwtty_port *port)
-{
- struct fwtty_peer *peer;
- int err = -ENOENT;
- unsigned int status = __fwtty_port_line_status(port);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST,
- peer->status_addr, &status,
- sizeof(status), NULL, port);
- }
- rcu_read_unlock();
-
- return err;
-}
-
-/*
- * fwtty_write_port_status - same as above but locked by port lock
- */
-static int fwtty_write_port_status(struct fwtty_port *port)
-{
- int err;
-
- spin_lock_bh(&port->lock);
- err = __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return err;
-}
-
-static void fwtty_throttle_port(struct fwtty_port *port)
-{
- struct tty_struct *tty;
- unsigned int old;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- spin_lock_bh(&port->lock);
-
- old = port->mctrl;
- port->mctrl |= OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl &= ~TIOCM_RTS;
- if (~old & OOB_RX_THROTTLE)
- __fwtty_write_port_status(port);
-
- spin_unlock_bh(&port->lock);
-
- tty_kref_put(tty);
-}
-
-/*
- * fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
- *
- * When the remote has finished tx, and all in-flight rx has been received and
- * pushed to the flip buffer, the remote may close its device. This will
- * drop DTR on the remote which will drop carrier here. Typically, the tty is
- * hung up when carrier is dropped or lost.
- *
- * However, there is a race between the hang up and the line discipline
- * delivering its data to the reader. A hangup will cause the ldisc to flush
- * (ie., clear) the read buffer and flip buffer. Because of firewire's
- * relatively high throughput, the ldisc frequently lags well behind the driver,
- * resulting in lost data (which has already been received and written to
- * the flip buffer) when the remote closes its end.
- *
- * Unfortunately, since the flip buffer offers no direct method for determining
- * if it holds data, ensuring the ldisc has delivered all data is problematic.
- */
-
-/* FIXME: drop this workaround when __tty_hangup waits for ldisc completion */
-static void fwtty_do_hangup(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(work, hangup);
- struct tty_struct *tty;
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(50));
-
- tty = tty_port_tty_get(&port->port);
- if (tty)
- tty_vhangup(tty);
- tty_kref_put(tty);
-}
-
-static void fwtty_emit_breaks(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
- static const char buf[16];
- unsigned long now = jiffies;
- unsigned long elapsed = now - port->break_last;
- int n, t, c, brk = 0;
-
- /* generate breaks at the line rate (but at least 1) */
- n = (elapsed * port->cps) / HZ + 1;
- port->break_last = now;
-
- fwtty_dbg(port, "sending %d brks\n", n);
-
- while (n) {
- t = min(n, 16);
- c = tty_insert_flip_string_fixed_flag(&port->port, buf,
- TTY_BREAK, t);
- n -= c;
- brk += c;
- if (c < t)
- break;
- }
- tty_flip_buffer_push(&port->port);
-
- if (port->mstatus & (UART_LSR_BI << 24))
- schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
- port->icount.brk += brk;
-}
-
-static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
-{
- int c, n = len;
- unsigned int lsr;
- int err = 0;
-
- fwtty_dbg(port, "%d\n", n);
- fwtty_profile_data(port->stats.reads, n);
-
- if (port->write_only) {
- n = 0;
- goto out;
- }
-
- /* disregard break status; breaks are generated by emit_breaks work */
- lsr = (port->mstatus >> 24) & ~UART_LSR_BI;
-
- if (port->overrun)
- lsr |= UART_LSR_OE;
-
- if (lsr & UART_LSR_OE)
- ++port->icount.overrun;
-
- lsr &= port->status_mask;
- if (lsr & ~port->ignore_mask & UART_LSR_OE) {
- if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
- err = -EIO;
- goto out;
- }
- }
- port->overrun = false;
-
- if (lsr & port->ignore_mask & ~UART_LSR_OE) {
- /* TODO: don't drop SAK and Magic SysRq here */
- n = 0;
- goto out;
- }
-
- c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
- if (c > 0)
- tty_flip_buffer_push(&port->port);
- n -= c;
-
- if (n) {
- port->overrun = true;
- err = -EIO;
- fwtty_err_ratelimited(port, "flip buffer overrun\n");
-
- } else {
- /* throttle the sender if remaining flip buffer space has
- * reached high watermark to avoid losing data which may be
- * in-flight. Since the AR request context is 32k, that much
- * data may have _already_ been acked.
- */
- if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
- fwtty_throttle_port(port);
- }
-
-out:
- port->icount.rx += len;
- port->stats.lost += n;
- return err;
-}
-
-/*
- * fwtty_port_handler - bus address handler for port reads/writes
- *
- * This handler is responsible for handling inbound read/write dma from remotes.
- */
-static void fwtty_port_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwtty_port *port = callback_data;
- struct fwtty_peer *peer;
- int err;
- int rcode;
-
- /* Only accept rx from the peer virtual-cabled to this port */
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- rcu_read_unlock();
- if (!peer || peer != rcu_access_pointer(port->peer)) {
- rcode = RCODE_ADDRESS_ERROR;
- fwtty_err_ratelimited(port, "ignoring unauthenticated data\n");
- goto respond;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- if (addr != port->rx_handler.offset || len != 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- fwtty_update_port_status(port, *(unsigned int *)data);
- rcode = RCODE_COMPLETE;
- }
- break;
-
- case TCODE_WRITE_BLOCK_REQUEST:
- if (addr != port->rx_handler.offset + 4 ||
- len > port->rx_handler.length - 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- err = fwtty_rx(port, data, len);
- switch (err) {
- case 0:
- rcode = RCODE_COMPLETE;
- break;
- case -EIO:
- rcode = RCODE_DATA_ERROR;
- break;
- default:
- rcode = RCODE_CONFLICT_ERROR;
- break;
- }
- }
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
-
-respond:
- fw_send_response(card, request, rcode);
-}
-
-/*
- * fwtty_tx_complete - callback for tx dma
- * @data: ignored, has no meaning for write txns
- * @length: ignored, has no meaning for write txns
- *
- * The writer must be woken here if the fifo has been emptied because it
- * may have slept if chars_in_buffer was != 0
- */
-static void fwtty_tx_complete(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn)
-{
- struct fwtty_port *port = txn->port;
- int len;
-
- fwtty_dbg(port, "rcode: %d\n", rcode);
-
- switch (rcode) {
- case RCODE_COMPLETE:
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->icount.tx += txn->dma_pended.len;
- break;
-
- default:
- /* TODO: implement retries */
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->stats.dropped += txn->dma_pended.len;
- }
-
- if (len < WAKEUP_CHARS)
- tty_port_tty_wakeup(&port->port);
-}
-
-static int fwtty_tx(struct fwtty_port *port, bool drain)
-{
- struct fwtty_peer *peer;
- struct fwtty_transaction *txn;
- struct tty_struct *tty;
- int n, len;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return -ENOENT;
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (!peer) {
- n = -EIO;
- goto out;
- }
-
- if (test_and_set_bit(IN_TX, &port->flags)) {
- n = -EALREADY;
- goto out;
- }
-
- /* try to write as many dma transactions out as possible */
- n = -EAGAIN;
- while (!tty->flow.stopped && !tty->hw_stopped &&
- !test_bit(STOP_TX, &port->flags)) {
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn) {
- n = -ENOMEM;
- break;
- }
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n);
-
- if (n < 0) {
- kmem_cache_free(fwtty_txn_cache, txn);
- if (n == -EAGAIN) {
- ++port->stats.tx_stall;
- } else if (n == -ENODATA) {
- fwtty_profile_data(port->stats.txns, 0);
- } else {
- ++port->stats.fifo_errs;
- fwtty_err_ratelimited(port, "fifo err: %d\n",
- n);
- }
- break;
- }
-
- fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
-
- fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, txn->dma_pended.data,
- txn->dma_pended.len, fwtty_tx_complete,
- port);
- ++port->stats.sent;
-
- /*
- * Stop tx if the 'last view' of the fifo is empty or if
- * this is the writer and there's not enough data to bother
- */
- if (n == 0 || (!drain && n < WRITER_MINIMUM))
- break;
- }
-
- if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) {
- spin_lock_bh(&port->lock);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len) {
- unsigned long delay = (n == -ENOMEM) ? HZ : 1;
-
- schedule_delayed_work(&port->drain, delay);
- }
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- /* wakeup the writer */
- if (drain && len < WAKEUP_CHARS)
- tty_wakeup(tty);
- }
-
- clear_bit(IN_TX, &port->flags);
- wake_up_interruptible(&port->wait_tx);
-
-out:
- rcu_read_unlock();
- tty_kref_put(tty);
- return n;
-}
-
-static void fwtty_drain_tx(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), drain);
-
- fwtty_tx(port, true);
-}
-
-static void fwtty_write_xchar(struct fwtty_port *port, char ch)
-{
- struct fwtty_peer *peer;
-
- ++port->stats.xchars;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, &ch, sizeof(ch),
- NULL, port);
- }
- rcu_read_unlock();
-}
-
-static struct fwtty_port *fwtty_port_get(unsigned int index)
-{
- struct fwtty_port *port;
-
- if (index >= MAX_TOTAL_PORTS)
- return NULL;
-
- mutex_lock(&port_table_lock);
- port = port_table[index];
- if (port)
- kref_get(&port->serial->kref);
- mutex_unlock(&port_table_lock);
- return port;
-}
-
-static int fwtty_ports_add(struct fw_serial *serial)
-{
- int err = -EBUSY;
- int i, j;
-
- if (port_table_corrupt)
- return err;
-
- mutex_lock(&port_table_lock);
- for (i = 0; i + num_ports <= MAX_TOTAL_PORTS; i += num_ports) {
- if (!port_table[i]) {
- for (j = 0; j < num_ports; ++i, ++j) {
- serial->ports[j]->index = i;
- port_table[i] = serial->ports[j];
- }
- err = 0;
- break;
- }
- }
- mutex_unlock(&port_table_lock);
- return err;
-}
-
-static void fwserial_destroy(struct kref *kref)
-{
- struct fw_serial *serial = to_serial(kref, kref);
- struct fwtty_port **ports = serial->ports;
- int j, i = ports[0]->index;
-
- synchronize_rcu();
-
- mutex_lock(&port_table_lock);
- for (j = 0; j < num_ports; ++i, ++j) {
- port_table_corrupt |= port_table[i] != ports[j];
- WARN_ONCE(port_table_corrupt, "port_table[%d]: %p != ports[%d]: %p",
- i, port_table[i], j, ports[j]);
-
- port_table[i] = NULL;
- }
- mutex_unlock(&port_table_lock);
-
- for (j = 0; j < num_ports; ++j) {
- fw_core_remove_address_handler(&ports[j]->rx_handler);
- tty_port_destroy(&ports[j]->port);
- kfree(ports[j]);
- }
- kfree(serial);
-}
-
-static void fwtty_port_put(struct fwtty_port *port)
-{
- kref_put(&port->serial->kref, fwserial_destroy);
-}
-
-static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- fwtty_dbg(port, "on/off: %d\n", on);
-
- spin_lock_bh(&port->lock);
- /* Don't change carrier state if this is a console */
- if (!port->port.console) {
- if (on)
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- }
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-/*
- * fwtty_port_carrier_raised: required tty_port operation
- *
- * This port operation is polled after a tty has been opened and is waiting for
- * carrier detect -- see drivers/tty/tty_port:tty_port_block_til_ready().
- */
-static int fwtty_port_carrier_raised(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- int rc;
-
- rc = (port->mstatus & TIOCM_CAR);
-
- fwtty_dbg(port, "%d\n", rc);
-
- return rc;
-}
-
-static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
-{
- unsigned int baud, frame;
-
- baud = tty_termios_baud_rate(&tty->termios);
- tty_termios_encode_baud_rate(&tty->termios, baud, baud);
-
- /* compute bit count of 2 frames */
- frame = 12 + ((C_CSTOPB(tty)) ? 4 : 2) + ((C_PARENB(tty)) ? 2 : 0);
-
- switch (C_CSIZE(tty)) {
- case CS5:
- frame -= (C_CSTOPB(tty)) ? 1 : 0;
- break;
- case CS6:
- frame += 2;
- break;
- case CS7:
- frame += 4;
- break;
- case CS8:
- frame += 6;
- break;
- }
-
- port->cps = (baud << 1) / frame;
-
- port->status_mask = UART_LSR_OE;
- if (_I_FLAG(tty, BRKINT | PARMRK))
- port->status_mask |= UART_LSR_BI;
-
- port->ignore_mask = 0;
- if (I_IGNBRK(tty)) {
- port->ignore_mask |= UART_LSR_BI;
- if (I_IGNPAR(tty))
- port->ignore_mask |= UART_LSR_OE;
- }
-
- port->write_only = !C_CREAD(tty);
-
- /* turn off echo and newline xlat if loopback */
- if (port->loopback) {
- tty->termios.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHOKE |
- ECHONL | ECHOPRT | ECHOCTL);
- tty->termios.c_oflag &= ~ONLCR;
- }
-
- return baud;
-}
-
-static int fwtty_port_activate(struct tty_port *tty_port,
- struct tty_struct *tty)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- unsigned int baud;
- int err;
-
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- err = dma_fifo_alloc(&port->tx_fifo, FWTTY_PORT_TXFIFO_LEN,
- cache_line_size(),
- port->max_payload,
- FWTTY_PORT_MAX_PEND_DMA,
- GFP_KERNEL);
- if (err)
- return err;
-
- spin_lock_bh(&port->lock);
-
- baud = set_termios(port, tty);
-
- /* if console, don't change carrier state */
- if (!port->port.console) {
- port->mctrl = 0;
- if (baud != 0)
- port->mctrl = TIOCM_DTR | TIOCM_RTS;
- }
-
- if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS)
- tty->hw_stopped = 1;
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
-
- return 0;
-}
-
-/*
- * fwtty_port_shutdown
- *
- * Note: the tty port core ensures this is not the console and
- * manages TTY_IO_ERROR properly
- */
-static void fwtty_port_shutdown(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- /* TODO: cancel outstanding transactions */
-
- cancel_delayed_work_sync(&port->emit_breaks);
- cancel_delayed_work_sync(&port->drain);
-
- spin_lock_bh(&port->lock);
- port->flags = 0;
- port->break_ctl = 0;
- port->overrun = 0;
- __fwtty_write_port_status(port);
- dma_fifo_free(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-}
-
-static int fwtty_open(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- return tty_port_open(&port->port, tty, fp);
-}
-
-static void fwtty_close(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_close(&port->port, tty, fp);
-}
-
-static void fwtty_hangup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_hangup(&port->port);
-}
-
-static void fwtty_cleanup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty->driver_data = NULL;
- fwtty_port_put(port);
-}
-
-static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(tty->index);
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(table_idx(tty->index));
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
-{
- struct fwtty_port *port = tty->driver_data;
- int n, len;
-
- fwtty_dbg(port, "%d\n", c);
- fwtty_profile_data(port->stats.writes, c);
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_in(&port->tx_fifo, buf, c);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len < DRAIN_THRESHOLD)
- schedule_delayed_work(&port->drain, 1);
- spin_unlock_bh(&port->lock);
-
- if (len >= DRAIN_THRESHOLD)
- fwtty_tx(port, false);
-
- debug_short_write(port, c, n);
-
- return (n < 0) ? 0 : n;
-}
-
-static unsigned int fwtty_write_room(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%u\n", n);
-
- return n;
-}
-
-static unsigned int fwtty_chars_in_buffer(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%u\n", n);
-
- return n;
-}
-
-static void fwtty_send_xchar(struct tty_struct *tty, char ch)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- fwtty_write_xchar(port, ch);
-}
-
-static void fwtty_throttle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- /*
- * Ignore throttling (but not unthrottling).
- * It only makes sense to throttle when data will no longer be
- * accepted by the tty flip buffer. For example, it is
- * possible for received data to overflow the tty buffer long
- * before the line discipline ever has a chance to throttle the driver.
- * Additionally, the driver may have already completed the I/O
- * but the tty buffer is still emptying, so the line discipline is
- * throttling and unthrottling nothing.
- */
-
- ++port->stats.throttled;
-}
-
-static void fwtty_unthrottle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "CRTSCTS: %d\n", C_CRTSCTS(tty) != 0);
-
- fwtty_profile_fifo(port, port->stats.unthrottle);
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl |= TIOCM_RTS;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-static int check_msr_delta(struct fwtty_port *port, unsigned long mask,
- struct async_icount *prev)
-{
- struct async_icount now;
- int delta;
-
- now = port->icount;
-
- delta = ((mask & TIOCM_RNG && prev->rng != now.rng) ||
- (mask & TIOCM_DSR && prev->dsr != now.dsr) ||
- (mask & TIOCM_CAR && prev->dcd != now.dcd) ||
- (mask & TIOCM_CTS && prev->cts != now.cts));
-
- *prev = now;
-
- return delta;
-}
-
-static int wait_msr_change(struct fwtty_port *port, unsigned long mask)
-{
- struct async_icount prev;
-
- prev = port->icount;
-
- return wait_event_interruptible(port->port.delta_msr_wait,
- check_msr_delta(port, mask, &prev));
-}
-
-static int get_serial_info(struct tty_struct *tty,
- struct serial_struct *ss)
-{
- struct fwtty_port *port = tty->driver_data;
-
- mutex_lock(&port->port.mutex);
- ss->line = port->index;
- ss->baud_base = 400000000;
- ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
- ss->closing_wait = 3000;
- mutex_unlock(&port->port.mutex);
-
- return 0;
-}
-
-static int set_serial_info(struct tty_struct *tty,
- struct serial_struct *ss)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int cdelay;
-
- cdelay = msecs_to_jiffies(ss->close_delay * 10);
-
- mutex_lock(&port->port.mutex);
- if (!capable(CAP_SYS_ADMIN)) {
- if (cdelay != port->port.close_delay ||
- ((ss->flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK))) {
- mutex_unlock(&port->port.mutex);
- return -EPERM;
- }
- }
- port->port.close_delay = cdelay;
- mutex_unlock(&port->port.mutex);
-
- return 0;
-}
-
-static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct fwtty_port *port = tty->driver_data;
- int err;
-
- switch (cmd) {
- case TIOCMIWAIT:
- err = wait_msr_change(port, arg);
- break;
-
- default:
- err = -ENOIOCTLCMD;
- }
-
- return err;
-}
-
-static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int baud;
-
- spin_lock_bh(&port->lock);
- baud = set_termios(port, tty);
-
- if ((baud == 0) && (old->c_cflag & CBAUD)) {
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- } else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
- if (C_CRTSCTS(tty) || !tty_throttled(tty))
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl |= TIOCM_DTR;
- }
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (old->c_cflag & CRTSCTS) {
- if (!C_CRTSCTS(tty)) {
- tty->hw_stopped = 0;
- fwtty_restart_tx(port);
- }
- } else if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) {
- tty->hw_stopped = 1;
- }
-}
-
-/*
- * fwtty_break_ctl - start/stop sending breaks
- *
- * Signals the remote to start or stop generating simulated breaks.
- * First, stop dequeueing from the fifo and wait for writer/drain to leave tx
- * before signalling the break line status. This guarantees any pending rx will
- * be queued to the line discipline before break is simulated on the remote.
- * Conversely, turning off break_ctl requires signalling the line status change,
- * then enabling tx.
- */
-static int fwtty_break_ctl(struct tty_struct *tty, int state)
-{
- struct fwtty_port *port = tty->driver_data;
- long ret;
-
- fwtty_dbg(port, "%d\n", state);
-
- if (state == -1) {
- set_bit(STOP_TX, &port->flags);
- ret = wait_event_interruptible_timeout(port->wait_tx,
- !test_bit(IN_TX, &port->flags),
- 10);
- if (ret == 0 || ret == -ERESTARTSYS) {
- clear_bit(STOP_TX, &port->flags);
- fwtty_restart_tx(port);
- return -EINTR;
- }
- }
-
- spin_lock_bh(&port->lock);
- port->break_ctl = (state == -1);
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (state == 0) {
- spin_lock_bh(&port->lock);
- dma_fifo_reset(&port->tx_fifo);
- clear_bit(STOP_TX, &port->flags);
- spin_unlock_bh(&port->lock);
- }
- return 0;
-}
-
-static int fwtty_tiocmget(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int tiocm;
-
- spin_lock_bh(&port->lock);
- tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%x\n", tiocm);
-
- return tiocm;
-}
-
-static int fwtty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "set: %x clear: %x\n", set, clear);
-
- /* TODO: simulate loopback if TIOCM_LOOP set */
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~(clear & MCTRL_MASK & 0xffff);
- port->mctrl |= set & MCTRL_MASK & 0xffff;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return 0;
-}
-
-static int fwtty_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount)
-{
- struct fwtty_port *port = tty->driver_data;
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- icount->cts = port->icount.cts;
- icount->dsr = port->icount.dsr;
- icount->rng = port->icount.rng;
- icount->dcd = port->icount.dcd;
- icount->rx = port->icount.rx;
- icount->tx = port->icount.tx + stats.xchars;
- icount->frame = port->icount.frame;
- icount->overrun = port->icount.overrun;
- icount->parity = port->icount.parity;
- icount->brk = port->icount.brk;
- icount->buf_overrun = port->icount.overrun;
- return 0;
-}
-
-static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset,
- port->icount.tx + stats.xchars, port->icount.rx);
- seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts,
- port->icount.dsr, port->icount.rng, port->icount.dcd);
- seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame,
- port->icount.overrun, port->icount.parity, port->icount.brk);
-}
-
-static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
- stats.tx_stall, stats.fifo_errs, stats.lost);
- seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
-
- if (port->port.console) {
- seq_puts(m, "\n ");
- (*port->fwcon_ops->proc_show)(m, port->con_data);
- }
-
- fwtty_dump_profile(m, &port->stats);
-}
-
-static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
-{
- int generation = peer->generation;
-
- smp_rmb();
- seq_printf(m, " %s:", dev_name(&peer->unit->device));
- seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
- seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
- peer->max_payload, (unsigned long long)peer->guid);
- seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr);
- seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr);
- seq_putc(m, '\n');
-}
-
-static int fwtty_proc_show(struct seq_file *m, void *v)
-{
- struct fwtty_port *port;
- int i;
-
- seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n");
- for (i = 0; i < MAX_TOTAL_PORTS && (port = fwtty_port_get(i)); ++i) {
- seq_printf(m, "%2d:", i);
- if (capable(CAP_SYS_ADMIN))
- fwtty_proc_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- return 0;
-}
-
-static int fwtty_stats_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_port *port;
- int i;
-
- for (i = 0; i < num_ports; ++i) {
- port = fwtty_port_get(serial->ports[i]->index);
- if (port) {
- seq_printf(m, "%2d:", port->index);
- fwtty_proc_show_port(m, port);
- fwtty_debugfs_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- }
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(fwtty_stats);
-
-static int fwtty_peers_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_peer *peer;
-
- rcu_read_lock();
- seq_printf(m, "card: %s guid: %016llx\n",
- dev_name(serial->card->device),
- (unsigned long long)serial->card->guid);
- list_for_each_entry_rcu(peer, &serial->peer_list, list)
- fwtty_debugfs_show_peer(m, peer);
- rcu_read_unlock();
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(fwtty_peers);
-
-static const struct tty_port_operations fwtty_port_ops = {
- .dtr_rts = fwtty_port_dtr_rts,
- .carrier_raised = fwtty_port_carrier_raised,
- .shutdown = fwtty_port_shutdown,
- .activate = fwtty_port_activate,
-};
-
-static const struct tty_operations fwtty_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwtty_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
- .set_serial = set_serial_info,
- .get_serial = get_serial_info,
- .proc_show = fwtty_proc_show,
-};
-
-static const struct tty_operations fwloop_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwloop_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
- .set_serial = set_serial_info,
- .get_serial = get_serial_info,
-};
-
-static inline int mgmt_pkt_expected_len(__be16 code)
-{
- static const struct fwserial_mgmt_pkt pkt;
-
- switch (be16_to_cpu(code)) {
- case FWSC_VIRT_CABLE_PLUG:
- return sizeof(pkt.hdr) + sizeof(pkt.plug_req);
-
- case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
- return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
-
- case FWSC_VIRT_CABLE_UNPLUG:
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
- case FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK:
- return sizeof(pkt.hdr);
-
- default:
- return -1;
- }
-}
-
-static inline void fill_plug_params(struct virt_plug_params *params,
- struct fwtty_port *port)
-{
- u64 status_addr = port->rx_handler.offset;
- u64 fifo_addr = port->rx_handler.offset + 4;
- size_t fifo_len = port->rx_handler.length - 4;
-
- params->status_hi = cpu_to_be32(status_addr >> 32);
- params->status_lo = cpu_to_be32(status_addr);
- params->fifo_hi = cpu_to_be32(fifo_addr >> 32);
- params->fifo_lo = cpu_to_be32(fifo_addr);
- params->fifo_len = cpu_to_be32(fifo_len);
-}
-
-static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_req, port);
-}
-
-static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_rsp, port);
-}
-
-static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
- struct virt_plug_params *params)
-{
- struct fwtty_port *port = peer->port;
-
- peer->status_addr = be32_to_u64(params->status_hi, params->status_lo);
- peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo);
- peer->fifo_len = be32_to_cpu(params->fifo_len);
- peer_set_state(peer, FWPS_ATTACHED);
-
- /* reconfigure tx_fifo optimally for this peer */
- spin_lock_bh(&port->lock);
- port->max_payload = min(peer->max_payload, peer->fifo_len);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
- spin_unlock_bh(&peer->port->lock);
-
- if (port->port.console && port->fwcon_ops->notify)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
-
- fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
- (unsigned long long)peer->guid, dev_name(port->device));
-}
-
-static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt)
-{
- int generation;
- int rcode, tries = 5;
-
- do {
- generation = peer->generation;
- smp_rmb();
-
- rcode = fw_run_transaction(peer->serial->card,
- TCODE_WRITE_BLOCK_REQUEST,
- peer->node_id,
- generation, peer->speed,
- peer->mgmt_addr,
- pkt, be16_to_cpu(pkt->hdr.len));
- if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR ||
- rcode == RCODE_GENERATION) {
- fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
- continue;
- } else {
- break;
- }
- } while (--tries > 0);
- return rcode;
-}
-
-/*
- * fwserial_claim_port - attempt to claim port @ index for peer
- *
- * Returns ptr to claimed port or error code (as ERR_PTR())
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
- int index)
-{
- struct fwtty_port *port;
-
- if (index < 0 || index >= num_ports)
- return ERR_PTR(-EINVAL);
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- port = peer->serial->ports[index];
- spin_lock_bh(&port->lock);
- if (!rcu_access_pointer(port->peer))
- rcu_assign_pointer(port->peer, peer);
- else
- port = ERR_PTR(-EBUSY);
- spin_unlock_bh(&port->lock);
-
- return port;
-}
-
-/*
- * fwserial_find_port - find avail port and claim for peer
- *
- * Returns ptr to claimed port or NULL if none avail
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
-{
- struct fwtty_port **ports = peer->serial->ports;
- int i;
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- /* TODO: implement optional GUID-to-specific port # matching */
-
- /* find an unattached port (but not the loopback port, if present) */
- for (i = 0; i < num_ttys; ++i) {
- spin_lock_bh(&ports[i]->lock);
- if (!ports[i]->peer) {
- /* claim port */
- rcu_assign_pointer(ports[i]->peer, peer);
- spin_unlock_bh(&ports[i]->lock);
- return ports[i];
- }
- spin_unlock_bh(&ports[i]->lock);
- }
- return NULL;
-}
-
-static void fwserial_release_port(struct fwtty_port *port, bool reset)
-{
- /* drop carrier (and all other line status) */
- if (reset)
- fwtty_update_port_status(port, 0);
-
- spin_lock_bh(&port->lock);
-
- /* reset dma fifo max transmission size back to S100 */
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
-
- RCU_INIT_POINTER(port->peer, NULL);
- spin_unlock_bh(&port->lock);
-
- if (port->port.console && port->fwcon_ops->notify)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
-}
-
-static void fwserial_plug_timeout(struct timer_list *t)
-{
- struct fwtty_peer *peer = from_timer(peer, t, timer);
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- if (peer->state != FWPS_PLUG_PENDING) {
- spin_unlock_bh(&peer->lock);
- return;
- }
-
- port = peer_revert_state(peer);
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, false);
-}
-
-/*
- * fwserial_connect_peer - initiate virtual cable with peer
- *
- * Returns 0 if VIRT_CABLE_PLUG request was successfully sent,
- * otherwise error code. Must be called from process context.
- */
-static int fwserial_connect_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int err, rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- port = fwserial_find_port(peer);
- if (!port) {
- fwtty_err(&peer->unit, "avail ports in use\n");
- err = -EBUSY;
- goto free_pkt;
- }
-
- spin_lock_bh(&peer->lock);
-
- /* only initiate VIRT_CABLE_PLUG if peer is currently not attached */
- if (peer->state != FWPS_NOT_ATTACHED) {
- err = -EBUSY;
- goto release_port;
- }
-
- peer->port = port;
- peer_set_state(peer, FWPS_PLUG_PENDING);
-
- fill_plug_req(pkt, peer->port);
-
- mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) {
- if (rcode == RCODE_CONFLICT_ERROR)
- err = -EAGAIN;
- else
- err = -EIO;
- goto cancel_timer;
- }
- spin_unlock_bh(&peer->lock);
-
- kfree(pkt);
- return 0;
-
-cancel_timer:
- del_timer(&peer->timer);
- peer_revert_state(peer);
-release_port:
- spin_unlock_bh(&peer->lock);
- fwserial_release_port(port, false);
-free_pkt:
- kfree(pkt);
- return err;
-}
-
-/*
- * fwserial_close_port -
- * HUP the tty (if the tty exists) and unregister the tty device.
- * Only used by the unit driver upon unit removal to disconnect and
- * cleanup all attached ports
- *
- * The port reference is put by fwtty_cleanup (if a reference was
- * ever taken).
- */
-static void fwserial_close_port(struct tty_driver *driver,
- struct fwtty_port *port)
-{
- struct tty_struct *tty;
-
- mutex_lock(&port->port.mutex);
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
- mutex_unlock(&port->port.mutex);
-
- if (driver == fwloop_driver)
- tty_unregister_device(driver, loop_idx(port));
- else
- tty_unregister_device(driver, port->index);
-}
-
-/**
- * fwserial_lookup - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be holding fwserial_list_mutex
- */
-static struct fw_serial *fwserial_lookup(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/**
- * __fwserial_lookup_rcu - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be inside rcu_read_lock() section
- */
-static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry_rcu(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/*
- * __fwserial_peer_by_node_id - finds a peer matching the given generation + id
- *
- * If a matching peer could not be found for the specified generation/node id,
- * this could be because:
- * a) the generation has changed and one of the nodes hasn't updated yet
- * b) the remote node has created its remote unit device before this
- * local node has created its corresponding remote unit device
- * In either case, the remote node should retry
- *
- * Note: caller must be in rcu_read_lock() section
- */
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial) {
- /*
- * Something is very wrong - there should be a matching
- * fw_serial structure for every fw_card. Maybe the remote node
- * has created its remote unit device before this driver has
- * been probed for any unit devices...
- */
- fwtty_err(card, "unknown card (guid %016llx)\n",
- (unsigned long long)card->guid);
- return NULL;
- }
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- if (generation == g && id == peer->node_id)
- return peer;
- }
-
- return NULL;
-}
-
-#ifdef DEBUG
-static void __dump_peer_list(struct fw_card *card)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial)
- return;
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
- g, peer->node_id, (unsigned long long)peer->guid);
- }
-}
-#else
-#define __dump_peer_list(s)
-#endif
-
-static void fwserial_auto_connect(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect);
- int err;
-
- err = fwserial_connect_peer(peer);
- if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES)
- schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
-}
-
-static void fwserial_peer_workfn(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
-
- peer->workfn(work);
-}
-
-/**
- * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
- * @serial: aggregate representing the specific fw_card to add the peer to
- * @unit: 'peer' to create and add to peer_list of serial
- *
- * Adds a 'peer' (ie, a local or remote 'serial' unit device) to the list of
- * peers for a specific fw_card. Optionally, auto-attach this peer to an
- * available tty port. This function is called either directly or indirectly
- * as a result of a 'serial' unit device being created & probed.
- *
- * Note: this function is serialized with fwserial_remove_peer() by the
- * fwserial_list_mutex held in fwserial_probe().
- *
- * A 1:1 correspondence between an fw_unit and an fwtty_peer is maintained
- * via the dev_set_drvdata() for the device of the fw_unit.
- */
-static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
-{
- struct device *dev = &unit->device;
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer;
- struct fw_csr_iterator ci;
- int key, val;
- int generation;
-
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
- if (!peer)
- return -ENOMEM;
-
- peer_set_state(peer, FWPS_NOT_ATTACHED);
-
- dev_set_drvdata(dev, peer);
- peer->unit = unit;
- peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4];
- peer->speed = parent->max_speed;
- peer->max_payload = min(device_max_receive(parent),
- link_speed_to_max_payload(peer->speed));
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-
- /* retrieve the mgmt bus addr from the unit directory */
- fw_csr_iterator_init(&ci, unit->directory);
- while (fw_csr_iterator_next(&ci, &key, &val)) {
- if (key == (CSR_OFFSET | CSR_DEPENDENT_INFO)) {
- peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val;
- break;
- }
- }
- if (peer->mgmt_addr == 0ULL) {
- /*
- * No mgmt address effectively disables VIRT_CABLE_PLUG -
- * this peer will not be able to attach to a remote
- */
- peer_set_state(peer, FWPS_NO_MGMT_ADDR);
- }
-
- spin_lock_init(&peer->lock);
- peer->port = NULL;
-
- timer_setup(&peer->timer, fwserial_plug_timeout, 0);
- INIT_WORK(&peer->work, fwserial_peer_workfn);
- INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
-
- /* associate peer with specific fw_card */
- peer->serial = serial;
- list_add_rcu(&peer->list, &serial->peer_list);
-
- fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- /* identify the local unit & virt cable to loopback port */
- if (parent->is_local) {
- serial->self = peer;
- if (create_loop_dev) {
- struct fwtty_port *port;
-
- port = fwserial_claim_port(peer, num_ttys);
- if (!IS_ERR(port)) {
- struct virt_plug_params params;
-
- spin_lock_bh(&peer->lock);
- peer->port = port;
- fill_plug_params(&params, port);
- fwserial_virt_plug_complete(peer, &params);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(port);
- }
- }
-
- } else if (auto_connect) {
- /* auto-attach to remote units only (if policy allows) */
- schedule_delayed_work(&peer->connect, 1);
- }
-
- return 0;
-}
-
-/*
- * fwserial_remove_peer - remove a 'serial' unit device as a 'peer'
- *
- * Remove a 'peer' from its list of peers. This function is only
- * called by fwserial_remove() on bus removal of the unit device.
- *
- * Note: this function is serialized with fwserial_add_peer() by the
- * fwserial_list_mutex held in fwserial_remove().
- */
-static void fwserial_remove_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- peer_set_state(peer, FWPS_GONE);
- spin_unlock_bh(&peer->lock);
-
- cancel_delayed_work_sync(&peer->connect);
- cancel_work_sync(&peer->work);
-
- spin_lock_bh(&peer->lock);
- /* if this unit is the local unit, clear link */
- if (peer == peer->serial->self)
- peer->serial->self = NULL;
-
- /* cancel the request timeout timer (if running) */
- del_timer(&peer->timer);
-
- port = peer->port;
- peer->port = NULL;
-
- list_del_rcu(&peer->list);
-
- fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, true);
-
- synchronize_rcu();
- kfree(peer);
-}
-
-/**
- * fwserial_create - init everything to create TTYs for a specific fw_card
- * @unit: fw_unit for first 'serial' unit device probed for this fw_card
- *
- * This function inits the aggregate structure (an fw_serial instance)
- * used to manage the TTY ports registered by a specific fw_card. Also, the
- * unit device is added as the first 'peer'.
- *
- * This unit device may represent a local unit device (as specified by the
- * config ROM unit directory) or it may represent a remote unit device
- * (as specified by the reading of the remote node's config ROM).
- *
- * Returns 0 to indicate "ownership" of the unit device, or a negative errno
- * value to indicate which error.
- */
-static int fwserial_create(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fw_card *card = parent->card;
- struct fw_serial *serial;
- struct fwtty_port *port;
- struct device *tty_dev;
- int i, j;
- int err;
-
- serial = kzalloc(sizeof(*serial), GFP_KERNEL);
- if (!serial)
- return -ENOMEM;
-
- kref_init(&serial->kref);
- serial->card = card;
- INIT_LIST_HEAD(&serial->peer_list);
-
- for (i = 0; i < num_ports; ++i) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto free_ports;
- }
- tty_port_init(&port->port);
- port->index = FWTTY_INVALID_INDEX;
- port->port.ops = &fwtty_port_ops;
- port->serial = serial;
- tty_buffer_set_limit(&port->port, 128 * 1024);
-
- spin_lock_init(&port->lock);
- INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
- INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
- INIT_WORK(&port->hangup, fwtty_do_hangup);
- init_waitqueue_head(&port->wait_tx);
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_init(&port->tx_fifo);
-
- RCU_INIT_POINTER(port->peer, NULL);
- serial->ports[i] = port;
-
- /* get unique bus addr region for port's status & recv fifo */
- port->rx_handler.length = FWTTY_PORT_RXFIFO_LEN + 4;
- port->rx_handler.address_callback = fwtty_port_handler;
- port->rx_handler.callback_data = port;
- /*
- * XXX: use custom memory region above cpu physical memory addrs
- * this will ease porting to 64-bit firewire adapters
- */
- err = fw_core_add_address_handler(&port->rx_handler,
- &fw_high_memory_region);
- if (err) {
- tty_port_destroy(&port->port);
- kfree(port);
- goto free_ports;
- }
- }
- /* preserve i for error cleanup */
-
- err = fwtty_ports_add(serial);
- if (err) {
- fwtty_err(&unit, "no space in port table\n");
- goto free_ports;
- }
-
- for (j = 0; j < num_ttys; ++j) {
- tty_dev = tty_port_register_device(&serial->ports[j]->port,
- fwtty_driver,
- serial->ports[j]->index,
- card->device);
- if (IS_ERR(tty_dev)) {
- err = PTR_ERR(tty_dev);
- fwtty_err(&unit, "register tty device error (%d)\n",
- err);
- goto unregister_ttys;
- }
-
- serial->ports[j]->device = tty_dev;
- }
- /* preserve j for error cleanup */
-
- if (create_loop_dev) {
- struct device *loop_dev;
-
- loop_dev = tty_port_register_device(&serial->ports[j]->port,
- fwloop_driver,
- loop_idx(serial->ports[j]),
- card->device);
- if (IS_ERR(loop_dev)) {
- err = PTR_ERR(loop_dev);
- fwtty_err(&unit, "create loop device failed (%d)\n",
- err);
- goto unregister_ttys;
- }
- serial->ports[j]->device = loop_dev;
- serial->ports[j]->loopback = true;
- }
-
- if (!IS_ERR_OR_NULL(fwserial_debugfs)) {
- serial->debugfs = debugfs_create_dir(dev_name(&unit->device),
- fwserial_debugfs);
- if (!IS_ERR_OR_NULL(serial->debugfs)) {
- debugfs_create_file("peers", 0444, serial->debugfs,
- serial, &fwtty_peers_fops);
- debugfs_create_file("stats", 0444, serial->debugfs,
- serial, &fwtty_stats_fops);
- }
- }
-
- list_add_rcu(&serial->list, &fwserial_list);
-
- fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
- dev_name(card->device), (unsigned long long)card->guid);
-
- err = fwserial_add_peer(serial, unit);
- if (!err)
- return 0;
-
- fwtty_err(&unit, "unable to add peer unit device (%d)\n", err);
-
- /* fall-through to error processing */
- debugfs_remove_recursive(serial->debugfs);
-
- list_del_rcu(&serial->list);
- if (create_loop_dev)
- tty_unregister_device(fwloop_driver,
- loop_idx(serial->ports[j]));
-unregister_ttys:
- for (--j; j >= 0; --j)
- tty_unregister_device(fwtty_driver, serial->ports[j]->index);
- kref_put(&serial->kref, fwserial_destroy);
- return err;
-
-free_ports:
- for (--i; i >= 0; --i) {
- fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
- tty_port_destroy(&serial->ports[i]->port);
- kfree(serial->ports[i]);
- }
- kfree(serial);
- return err;
-}
-
-/*
- * fwserial_probe: bus probe function for firewire 'serial' unit devices
- *
- * A 'serial' unit device is created and probed as a result of:
- * - declaring a ieee1394 bus id table for 'devices' matching a fabricated
- * 'serial' unit specifier id
- * - adding a unit directory to the config ROM(s) for a 'serial' unit
- *
- * The firewire core registers unit devices by enumerating unit directories
- * of a node's config ROM after reading the config ROM when a new node is
- * added to the bus topology after a bus reset.
- *
- * The practical implications of this are:
- * - this probe is called for both local and remote nodes that have a 'serial'
- * unit directory in their config ROM (that matches the specifiers in
- * fwserial_id_table).
- * - no specific order is enforced for local vs. remote unit devices
- *
- * This unit driver copes with the lack of specific order in the same way the
- * firewire net driver does -- each probe, for either a local or remote unit
- * device, is treated as a 'peer' (has a struct fwtty_peer instance) and the
- * first peer created for a given fw_card (tracked by the global fwserial_list)
- * creates the underlying TTYs (aggregated in a fw_serial instance).
- *
- * NB: an early attempt to differentiate local & remote unit devices by creating
- * peers only for remote units and fw_serial instances (with their
- * associated TTY devices) only for local units was discarded. Managing
- * the peer lifetimes on device removal proved too complicated.
- *
- * fwserial_probe/fwserial_remove are effectively serialized by the
- * fwserial_list_mutex. This is necessary because the addition of the first peer
- * for a given fw_card will trigger the creation of the fw_serial for that
- * fw_card, which must not simultaneously contend with the removal of the
- * last peer for a given fw_card triggering the destruction of the same
- * fw_serial for the same fw_card.
- */
-static int fwserial_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *id)
-{
- struct fw_serial *serial;
- int err;
-
- mutex_lock(&fwserial_list_mutex);
- serial = fwserial_lookup(fw_parent_device(unit)->card);
- if (!serial)
- err = fwserial_create(unit);
- else
- err = fwserial_add_peer(serial, unit);
- mutex_unlock(&fwserial_list_mutex);
- return err;
-}
-
-/*
- * fwserial_remove: bus removal function for firewire 'serial' unit devices
- *
- * The corresponding 'peer' for this unit device is removed from the list of
- * peers for the associated fw_serial (which has a 1:1 correspondence with a
- * specific fw_card). If this is the last peer being removed, then trigger
- * the destruction of the underlying TTYs.
- */
-static void fwserial_remove(struct fw_unit *unit)
-{
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- struct fw_serial *serial = peer->serial;
- int i;
-
- mutex_lock(&fwserial_list_mutex);
- fwserial_remove_peer(peer);
-
- if (list_empty(&serial->peer_list)) {
- /* unlink from the fwserial_list here */
- list_del_rcu(&serial->list);
-
- debugfs_remove_recursive(serial->debugfs);
-
- for (i = 0; i < num_ttys; ++i)
- fwserial_close_port(fwtty_driver, serial->ports[i]);
- if (create_loop_dev)
- fwserial_close_port(fwloop_driver, serial->ports[i]);
- kref_put(&serial->kref, fwserial_destroy);
- }
- mutex_unlock(&fwserial_list_mutex);
-}
-
-/*
- * fwserial_update: bus update function for 'firewire' serial unit devices
- *
- * Updates the new node_id and bus generation for this peer. Note that locking
- * is unnecessary; but careful memory barrier usage is important to enforce the
- * load and store order of generation & node_id.
- *
- * The fw-core orders the write of node_id before generation in the parent
- * fw_device to ensure that a stale node_id cannot be used with a current
- * bus generation. So the generation value must be read before the node_id.
- *
- * In turn, this orders the write of node_id before generation in the peer to
- * also ensure a stale node_id cannot be used with a current bus generation.
- */
-static void fwserial_update(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- int generation;
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-}
-
-static const struct ieee1394_device_id fwserial_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .specifier_id = LINUX_VENDOR_ID,
- .version = FWSERIAL_VERSION,
- },
- { }
-};
-
-static struct fw_driver fwserial_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = KBUILD_MODNAME,
- .bus = &fw_bus_type,
- },
- .probe = fwserial_probe,
- .update = fwserial_update,
- .remove = fwserial_remove,
- .id_table = fwserial_id_table,
-};
-
-#define FW_UNIT_SPECIFIER(id) ((CSR_SPECIFIER_ID << 24) | (id))
-#define FW_UNIT_VERSION(ver) ((CSR_VERSION << 24) | (ver))
-#define FW_UNIT_ADDRESS(ofs) (((CSR_OFFSET | CSR_DEPENDENT_INFO) << 24) \
- | (((ofs) - CSR_REGISTER_BASE) >> 2))
-/* XXX: config ROM definitons could be improved with semi-automated offset
- * and length calculation
- */
-#define FW_ROM_LEN(quads) ((quads) << 16)
-#define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs))
-
-struct fwserial_unit_directory_data {
- u32 len_crc;
- u32 unit_specifier;
- u32 unit_sw_version;
- u32 unit_addr_offset;
- u32 desc1_ofs;
- u32 desc1_len_crc;
- u32 desc1_data[5];
-} __packed;
-
-static struct fwserial_unit_directory_data fwserial_unit_directory_data = {
- .len_crc = FW_ROM_LEN(4),
- .unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID),
- .unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION),
- .desc1_ofs = FW_ROM_DESCRIPTOR(1),
- .desc1_len_crc = FW_ROM_LEN(5),
- .desc1_data = {
- 0x00000000, /* type = text */
- 0x00000000, /* enc = ASCII, lang EN */
- 0x4c696e75, /* 'Linux TTY' */
- 0x78205454,
- 0x59000000,
- },
-};
-
-static struct fw_descriptor fwserial_unit_directory = {
- .length = sizeof(fwserial_unit_directory_data) / sizeof(u32),
- .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
- .data = (u32 *)&fwserial_unit_directory_data,
-};
-
-/*
- * The management address is in the unit space region but above other known
- * address users (to keep wild writes from causing havoc)
- */
-static const struct fw_address_region fwserial_mgmt_addr_region = {
- .start = CSR_REGISTER_BASE + 0x1e0000ULL,
- .end = 0x1000000000000ULL,
-};
-
-static struct fw_address_handler fwserial_mgmt_addr_handler;
-
-/**
- * fwserial_handle_plug_req - handle VIRT_CABLE_PLUG request work
- * @work: ptr to peer->work
- *
- * Attempts to complete the VIRT_CABLE_PLUG handshake sequence for this peer.
- *
- * This checks for a collided request-- ie, that a VIRT_CABLE_PLUG request was
- * already sent to this peer. If so, the collision is resolved by comparing
- * guid values; the loser sends the plug response.
- *
- * Note: if an error prevents a response, don't do anything -- the
- * remote will timeout its request.
- */
-static void fwserial_handle_plug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct virt_plug_params *plug_req = &peer->work_params.plug_req;
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- port = fwserial_find_port(peer);
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_NOT_ATTACHED:
- if (!port) {
- fwtty_err(&peer->unit, "no more ports avail\n");
- fill_plug_rsp_nack(pkt);
- } else {
- peer->port = port;
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- /* don't release claimed port */
- port = NULL;
- }
- break;
-
- case FWPS_PLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - hijack the already-claimed port and send ok */
- del_timer(&peer->timer);
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- break;
-
- default:
- fill_plug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_RESPONDING) {
- if (rcode == RCODE_COMPLETE) {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, plug_req);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- } else {
- fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
- port = peer_revert_state(peer);
- }
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
- kfree(pkt);
-}
-
-static void fwserial_handle_unplug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct fwtty_port *port = NULL;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_ATTACHED:
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- case FWPS_UNPLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - send unplug rsp */
- del_timer(&peer->timer);
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- default:
- fill_unplug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_UNPLUG_RESPONDING) {
- if (rcode != RCODE_COMPLETE)
- fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
- rcode);
- port = peer_revert_state(peer);
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, true);
- kfree(pkt);
-}
-
-static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt,
- unsigned long long addr,
- size_t len)
-{
- struct fwtty_port *port = NULL;
- bool reset = false;
- int rcode;
-
- if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
- return RCODE_ADDRESS_ERROR;
-
- if (len != be16_to_cpu(pkt->hdr.len) ||
- len != mgmt_pkt_expected_len(pkt->hdr.code))
- return RCODE_DATA_ERROR;
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_GONE) {
- /*
- * This should never happen - it would mean that the
- * remote unit that just wrote this transaction was
- * already removed from the bus -- and the removal was
- * processed before we rec'd this transaction
- */
- fwtty_err(&peer->unit, "peer already removed\n");
- spin_unlock_bh(&peer->lock);
- return RCODE_ADDRESS_ERROR;
- }
-
- rcode = RCODE_COMPLETE;
-
- fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04x\n", pkt->hdr.code);
-
- switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
- case FWSC_VIRT_CABLE_PLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "plug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- peer->work_params.plug_req = pkt->plug_req;
- peer->workfn = fwserial_handle_plug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_PLUG_RSP:
- if (peer->state != FWPS_PLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
-
- } else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
- fwtty_notice(&peer->unit, "NACK plug rsp\n");
- port = peer_revert_state(peer);
-
- } else {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "unplug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- peer->workfn = fwserial_handle_unplug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- if (peer->state != FWPS_UNPLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
- fwtty_notice(&peer->unit, "NACK unplug?\n");
- port = peer_revert_state(peer);
- reset = true;
- }
- break;
-
- default:
- fwtty_err(&peer->unit, "unknown mgmt code %d\n",
- be16_to_cpu(pkt->hdr.code));
- rcode = RCODE_DATA_ERROR;
- }
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, reset);
-
- return rcode;
-}
-
-/*
- * fwserial_mgmt_handler: bus address handler for mgmt requests
- *
- * This handler is responsible for handling virtual cable requests from remotes
- * for all cards.
- */
-static void fwserial_mgmt_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwserial_mgmt_pkt *pkt = data;
- struct fwtty_peer *peer;
- int rcode;
-
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- if (!peer) {
- fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source);
- __dump_peer_list(card);
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- switch (tcode) {
- case TCODE_WRITE_BLOCK_REQUEST:
- rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
- }
-
- rcu_read_unlock();
- fw_send_response(card, request, rcode);
-}
-
-static int __init fwserial_init(void)
-{
- int err, num_loops = !!(create_loop_dev);
-
- /* XXX: placeholder for a "firewire" debugfs node */
- fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- /* num_ttys/num_ports must not be set above the static alloc avail */
- if (num_ttys + num_loops > MAX_CARD_PORTS)
- num_ttys = MAX_CARD_PORTS - num_loops;
-
- num_ports = num_ttys + num_loops;
-
- fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwtty_driver)) {
- err = PTR_ERR(fwtty_driver);
- goto remove_debugfs;
- }
-
- fwtty_driver->driver_name = KBUILD_MODNAME;
- fwtty_driver->name = tty_dev_name;
- fwtty_driver->major = 0;
- fwtty_driver->minor_start = 0;
- fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwtty_driver->subtype = SERIAL_TYPE_NORMAL;
- fwtty_driver->init_termios = tty_std_termios;
- fwtty_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwtty_driver, &fwtty_ops);
-
- err = tty_register_driver(fwtty_driver);
- if (err) {
- pr_err("register tty driver failed (%d)\n", err);
- goto put_tty;
- }
-
- if (create_loop_dev) {
- fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports,
- TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwloop_driver)) {
- err = PTR_ERR(fwloop_driver);
- goto unregister_driver;
- }
-
- fwloop_driver->driver_name = KBUILD_MODNAME "_loop";
- fwloop_driver->name = loop_dev_name;
- fwloop_driver->major = 0;
- fwloop_driver->minor_start = 0;
- fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwloop_driver->subtype = SERIAL_TYPE_NORMAL;
- fwloop_driver->init_termios = tty_std_termios;
- fwloop_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwloop_driver, &fwloop_ops);
-
- err = tty_register_driver(fwloop_driver);
- if (err) {
- pr_err("register loop driver failed (%d)\n", err);
- goto put_loop;
- }
- }
-
- fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
- sizeof(struct fwtty_transaction),
- 0, 0, NULL);
- if (!fwtty_txn_cache) {
- err = -ENOMEM;
- goto unregister_loop;
- }
-
- /*
- * Ideally, this address handler would be registered per local node
- * (rather than the same handler for all local nodes). However,
- * since the firewire core requires the config rom descriptor *before*
- * the local unit device(s) are created, a single management handler
- * must suffice for all local serial units.
- */
- fwserial_mgmt_addr_handler.length = sizeof(struct fwserial_mgmt_pkt);
- fwserial_mgmt_addr_handler.address_callback = fwserial_mgmt_handler;
-
- err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler,
- &fwserial_mgmt_addr_region);
- if (err) {
- pr_err("add management handler failed (%d)\n", err);
- goto destroy_cache;
- }
-
- fwserial_unit_directory_data.unit_addr_offset =
- FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset);
- err = fw_core_add_descriptor(&fwserial_unit_directory);
- if (err) {
- pr_err("add unit descriptor failed (%d)\n", err);
- goto remove_handler;
- }
-
- err = driver_register(&fwserial_driver.driver);
- if (err) {
- pr_err("register fwserial driver failed (%d)\n", err);
- goto remove_descriptor;
- }
-
- return 0;
-
-remove_descriptor:
- fw_core_remove_descriptor(&fwserial_unit_directory);
-remove_handler:
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
-destroy_cache:
- kmem_cache_destroy(fwtty_txn_cache);
-unregister_loop:
- if (create_loop_dev)
- tty_unregister_driver(fwloop_driver);
-put_loop:
- if (create_loop_dev)
- tty_driver_kref_put(fwloop_driver);
-unregister_driver:
- tty_unregister_driver(fwtty_driver);
-put_tty:
- tty_driver_kref_put(fwtty_driver);
-remove_debugfs:
- debugfs_remove_recursive(fwserial_debugfs);
-
- return err;
-}
-
-static void __exit fwserial_exit(void)
-{
- driver_unregister(&fwserial_driver.driver);
- fw_core_remove_descriptor(&fwserial_unit_directory);
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
- kmem_cache_destroy(fwtty_txn_cache);
- if (create_loop_dev) {
- tty_unregister_driver(fwloop_driver);
- tty_driver_kref_put(fwloop_driver);
- }
- tty_unregister_driver(fwtty_driver);
- tty_driver_kref_put(fwtty_driver);
- debugfs_remove_recursive(fwserial_debugfs);
-}
-
-module_init(fwserial_init);
-module_exit(fwserial_exit);
-
-MODULE_AUTHOR("Peter Hurley (peter@hurleysoftware.com)");
-MODULE_DESCRIPTION("FireWire Serial TTY Driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table);
-MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node");
-MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered");
-MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
deleted file mode 100644
index 1d15f183e0fa..000000000000
--- a/drivers/staging/fwserial/fwserial.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FIREWIRE_FWSERIAL_H
-#define _FIREWIRE_FWSERIAL_H
-
-#include <linux/kernel.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/list.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/mutex.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-
-#include "dma_fifo.h"
-
-#ifdef FWTTY_PROFILING
-#define DISTRIBUTION_MAX_SIZE 8192
-#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
-{
- int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
- ++stat[n];
-}
-#else
-#define DISTRIBUTION_MAX_INDEX 0
-#define fwtty_profile_data(st, n)
-#endif
-
-/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
-struct virt_plug_params {
- __be32 status_hi;
- __be32 status_lo;
- __be32 fifo_hi;
- __be32 fifo_lo;
- __be32 fifo_len;
-};
-
-struct peer_work_params {
- union {
- struct virt_plug_params plug_req;
- };
-};
-
-/**
- * fwtty_peer: structure representing local & remote unit devices
- * @unit: unit child device of fw_device node
- * @serial: back pointer to associated fw_serial aggregate
- * @guid: unique 64-bit guid for this unit device
- * @generation: most recent bus generation
- * @node_id: most recent node_id
- * @speed: link speed of peer (0 = S100, 2 = S400, ... 5 = S3200)
- * @mgmt_addr: bus addr region to write mgmt packets to
- * @status_addr: bus addr register to write line status to
- * @fifo_addr: bus addr region to write serial output to
- * @fifo_len: max length for single write to fifo_addr
- * @list: link for insertion into fw_serial's peer_list
- * @rcu: for deferring peer reclamation
- * @lock: spinlock to synchonize changes to state & port fields
- * @work: only one work item can be queued at any one time
- * Note: pending work is canceled prior to removal, so this
- * peer is valid for at least the lifetime of the work function
- * @work_params: parameter block for work functions
- * @timer: timer for resetting peer state if remote request times out
- * @state: current state
- * @connect: work item for auto-connecting
- * @connect_retries: # of connections already attempted
- * @port: associated tty_port (usable if state == FWSC_ATTACHED)
- */
-struct fwtty_peer {
- struct fw_unit *unit;
- struct fw_serial *serial;
- u64 guid;
- int generation;
- int node_id;
- unsigned int speed;
- int max_payload;
- u64 mgmt_addr;
-
- /* these are usable only if state == FWSC_ATTACHED */
- u64 status_addr;
- u64 fifo_addr;
- int fifo_len;
-
- struct list_head list;
- struct rcu_head rcu;
-
- spinlock_t lock;
- work_func_t workfn;
- struct work_struct work;
- struct peer_work_params work_params;
- struct timer_list timer;
- int state;
- struct delayed_work connect;
- int connect_retries;
-
- struct fwtty_port *port;
-};
-
-#define to_peer(ptr, field) (container_of(ptr, struct fwtty_peer, field))
-
-/* state values for fwtty_peer.state field */
-enum fwtty_peer_state {
- FWPS_GONE,
- FWPS_NOT_ATTACHED,
- FWPS_ATTACHED,
- FWPS_PLUG_PENDING,
- FWPS_PLUG_RESPONDING,
- FWPS_UNPLUG_PENDING,
- FWPS_UNPLUG_RESPONDING,
-
- FWPS_NO_MGMT_ADDR = -1,
-};
-
-#define CONNECT_RETRY_DELAY HZ
-#define MAX_CONNECT_RETRIES 10
-
-/* must be holding peer lock for these state funclets */
-static inline void peer_set_state(struct fwtty_peer *peer, int new)
-{
- peer->state = new;
-}
-
-static inline struct fwtty_port *peer_revert_state(struct fwtty_peer *peer)
-{
- struct fwtty_port *port = peer->port;
-
- peer->port = NULL;
- peer_set_state(peer, FWPS_NOT_ATTACHED);
- return port;
-}
-
-struct fwserial_mgmt_pkt {
- struct {
- __be16 len;
- __be16 code;
- } hdr;
- union {
- struct virt_plug_params plug_req;
- struct virt_plug_params plug_rsp;
- };
-} __packed;
-
-/* fwserial_mgmt_packet codes */
-#define FWSC_RSP_OK 0x0000
-#define FWSC_RSP_NACK 0x8000
-#define FWSC_CODE_MASK 0x0fff
-
-#define FWSC_VIRT_CABLE_PLUG 1
-#define FWSC_VIRT_CABLE_UNPLUG 2
-#define FWSC_VIRT_CABLE_PLUG_RSP 3
-#define FWSC_VIRT_CABLE_UNPLUG_RSP 4
-
-/* 1 min. plug timeout -- suitable for userland authorization */
-#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
-
-struct stats {
- unsigned int xchars;
- unsigned int dropped;
- unsigned int tx_stall;
- unsigned int fifo_errs;
- unsigned int sent;
- unsigned int lost;
- unsigned int throttled;
- unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
-};
-
-struct fwconsole_ops {
- void (*notify)(int code, void *data);
- void (*stats)(struct stats *stats, void *data);
- void (*proc_show)(struct seq_file *m, void *data);
-};
-
-/* codes for console ops notify */
-#define FWCON_NOTIFY_ATTACH 1
-#define FWCON_NOTIFY_DETACH 2
-
-/**
- * fwtty_port: structure used to track/represent underlying tty_port
- * @port: underlying tty_port
- * @device: tty device
- * @index: index into port_table for this particular port
- * note: minor = index + minor_start assigned by tty_alloc_driver()
- * @serial: back pointer to the containing fw_serial
- * @rx_handler: bus address handler for unique addr region used by remotes
- * to communicate with this port. Every port uses
- * fwtty_port_handler() for per port transactions.
- * @fwcon_ops: ops for attached fw_console (if any)
- * @con_data: private data for fw_console
- * @wait_tx: waitqueue for sleeping until writer/drain completes tx
- * @emit_breaks: delayed work responsible for generating breaks when the
- * break line status is active
- * @cps : characters per second computed from the termios settings
- * @break_last: timestamp in jiffies from last emit_breaks
- * @hangup: work responsible for HUPing when carrier is dropped/lost
- * @mstatus: loose virtualization of LSR/MSR
- * bits 15..0 correspond to TIOCM_* bits
- * bits 19..16 reserved for mctrl
- * bit 20 OOB_TX_THROTTLE
- * bits 23..21 reserved
- * bits 31..24 correspond to UART_LSR_* bits
- * @lock: spinlock for protecting concurrent access to fields below it
- * @mctrl: loose virtualization of MCR
- * bits 15..0 correspond to TIOCM_* bits
- * bit 16 OOB_RX_THROTTLE
- * bits 19..17 reserved
- * bits 31..20 reserved for mstatus
- * @drain: delayed work scheduled to ensure that writes are flushed.
- * The work can race with the writer but concurrent sending is
- * prevented with the IN_TX flag. Scheduled under lock to
- * limit scheduling when fifo has just been drained.
- * @tx_fifo: fifo used to store & block-up writes for dma to remote
- * @max_payload: max bytes transmissible per dma (based on peer's max_payload)
- * @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
- * @ignore_mask: UART_LSR_* bitmask of states to ignore (also based on termios)
- * @break_ctl: if set, port is 'sending break' to remote
- * @write_only: self-explanatory
- * @overrun: previous rx was lost (partially or completely)
- * @loopback: if set, port is in loopback mode
- * @flags: atomic bit flags
- * bit 0: IN_TX - gate to allow only one cpu to send from the dma fifo
- * at a time.
- * bit 1: STOP_TX - force tx to exit while sending
- * @peer: rcu-pointer to associated fwtty_peer (if attached)
- * NULL if no peer attached
- * @icount: predefined statistics reported by the TIOCGICOUNT ioctl
- * @stats: additional statistics reported in /proc/tty/driver/firewire_serial
- */
-struct fwtty_port {
- struct tty_port port;
- struct device *device;
- unsigned int index;
- struct fw_serial *serial;
- struct fw_address_handler rx_handler;
-
- struct fwconsole_ops *fwcon_ops;
- void *con_data;
-
- wait_queue_head_t wait_tx;
- struct delayed_work emit_breaks;
- unsigned int cps;
- unsigned long break_last;
-
- struct work_struct hangup;
-
- unsigned int mstatus;
-
- spinlock_t lock;
- unsigned int mctrl;
- struct delayed_work drain;
- struct dma_fifo tx_fifo;
- int max_payload;
- unsigned int status_mask;
- unsigned int ignore_mask;
- unsigned int break_ctl:1,
- write_only:1,
- overrun:1,
- loopback:1;
- unsigned long flags;
-
- struct fwtty_peer __rcu *peer;
-
- struct async_icount icount;
- struct stats stats;
-};
-
-#define to_port(ptr, field) (container_of(ptr, struct fwtty_port, field))
-
-/* bit #s for flags field */
-#define IN_TX 0
-#define STOP_TX 1
-
-/* bitmasks for special mctrl/mstatus bits */
-#define OOB_RX_THROTTLE 0x00010000
-#define MCTRL_RSRVD 0x000e0000
-#define OOB_TX_THROTTLE 0x00100000
-#define MSTATUS_RSRVD 0x00e00000
-
-#define MCTRL_MASK (TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | \
- TIOCM_LOOP | OOB_RX_THROTTLE | MCTRL_RSRVD)
-
-/* XXX even every 1/50th secs. may be unnecessarily accurate */
-/* delay in jiffies between brk emits */
-#define FREQ_BREAKS (HZ / 50)
-
-/* Ports are allocated in blocks of num_ports for each fw_card */
-#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS
-#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS
-
-/* tuning parameters */
-#define FWTTY_PORT_TXFIFO_LEN 4096
-#define FWTTY_PORT_MAX_PEND_DMA 8 /* costs a cache line per pend */
-#define DRAIN_THRESHOLD 1024
-#define MAX_ASYNC_PAYLOAD 4096 /* ohci-defined limit */
-#define WRITER_MINIMUM 128
-/* TODO: how to set watermark to AR context size? see fwtty_rx() */
-#define HIGH_WATERMARK 32768 /* AR context is 32K */
-
-/*
- * Size of bus addr region above 4GB used per port as the recv addr
- * - must be at least as big as the MAX_ASYNC_PAYLOAD
- */
-#define FWTTY_PORT_RXFIFO_LEN MAX_ASYNC_PAYLOAD
-
-/**
- * fw_serial: aggregate used to associate tty ports with specific fw_card
- * @card: fw_card associated with this fw_serial device (1:1 association)
- * @kref: reference-counted multi-port management allows delayed destroy
- * @self: local unit device as 'peer'. Not valid until local unit device
- * is enumerated.
- * @list: link for insertion into fwserial_list
- * @peer_list: list of local & remote unit devices attached to this card
- * @ports: fixed array of tty_ports provided by this serial device
- */
-struct fw_serial {
- struct fw_card *card;
- struct kref kref;
-
- struct dentry *debugfs;
- struct fwtty_peer *self;
-
- struct list_head list;
- struct list_head peer_list;
-
- struct fwtty_port *ports[MAX_CARD_PORTS];
-};
-
-#define to_serial(ptr, field) (container_of(ptr, struct fw_serial, field))
-
-#define TTY_DEV_NAME "fwtty" /* ttyFW was taken */
-static const char tty_dev_name[] = TTY_DEV_NAME;
-static const char loop_dev_name[] = "fwloop";
-
-extern struct tty_driver *fwtty_driver;
-
-/*
- * Returns the max send async payload size in bytes based on the unit device
- * link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
- * is not necessary and does not work, because
- * 1) asynchronous traffic will absorb all available bandwidth (less that
- * being used for isochronous traffic)
- * 2) isochronous arbitration always wins.
- */
-static inline int link_speed_to_max_payload(unsigned int speed)
-{
- /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
- return min(512 << speed, 4096);
-}
-
-#endif /* _FIREWIRE_FWSERIAL_H */
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 05e91e6bc2a0..223987616e07 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -3,7 +3,6 @@
* Greybus Audio Sound SoC helper APIs
*/
-#include <linux/debugfs.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -116,10 +115,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
{
int i;
struct snd_soc_dapm_widget *w, *tmp_w;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *parent = dapm->debugfs_dapm;
- struct dentry *debugfs_w = NULL;
-#endif
mutex_lock(&dapm->card->dapm_mutex);
for (i = 0; i < num; i++) {
@@ -139,12 +134,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
continue;
}
widget++;
-#ifdef CONFIG_DEBUG_FS
- if (!parent)
- debugfs_w = debugfs_lookup(w->name, parent);
- debugfs_remove(debugfs_w);
- debugfs_w = NULL;
-#endif
gbaudio_dapm_free_widget(w);
}
mutex_unlock(&dapm->card->dapm_mutex);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index dc4ed0ff1ae2..90ff07f2cbf7 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -480,7 +480,7 @@ static int gb_tty_break_ctl(struct tty_struct *tty, int state)
}
static void gb_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old)
+ const struct ktermios *termios_old)
{
struct gb_uart_set_line_coding_request newline;
struct gb_tty *gb_tty = tty->driver_data;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index a8e970db179d..afd05bf3345e 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -8,7 +8,6 @@ menu "IIO staging drivers"
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
-source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index b15904b99581..5ed56fe57e14 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -6,7 +6,6 @@
obj-y += accel/
obj-y += adc/
obj-y += addac/
-obj-y += cdc/
obj-y += frequency/
obj-y += impedance-analyzer/
obj-y += meter/
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
deleted file mode 100644
index a7386bbbcb79..000000000000
--- a/drivers/staging/iio/cdc/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# CDC drivers
-#
-menu "Capacitance to digital converters"
-
-config AD7746
- tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (AD7745, AD7746, AD7747) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7746.
-
-endmenu
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
deleted file mode 100644
index afb7499a7090..000000000000
--- a/drivers/staging/iio/cdc/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for industrial I/O CDC drivers
-#
-
-obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index f43464db618a..6f9eebd6c7ee 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -112,10 +112,10 @@ struct ad9832_state {
* transfer buffers to live in their own cache lines.
*/
union {
- __be16 freq_data[4]____cacheline_aligned;
+ __be16 freq_data[4];
__be16 phase_data[2];
__be16 data;
- };
+ } __aligned(IIO_DMA_MINALIGN);
};
static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 94b131ef8a22..2b4267a87e65 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -83,7 +83,7 @@ struct ad9834_state {
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
- __be16 data ____cacheline_aligned;
+ __be16 data __aligned(IIO_DMA_MINALIGN);
__be16 freq_data[2];
};
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index a51e6e3183d3..7a49f8f1016f 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -162,7 +162,7 @@ struct ade7854_state {
int bits);
int irq;
struct mutex buf_lock;
- u8 tx[ADE7854_MAX_TX] ____cacheline_aligned;
+ u8 tx[ADE7854_MAX_TX] __aligned(IIO_DMA_MINALIGN);
u8 rx[ADE7854_MAX_RX];
};
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index c0b2716d0511..e4cf42438487 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -94,8 +94,8 @@ struct ad2s1210_state {
bool hysteresis;
u8 resolution;
enum ad2s1210_mode mode;
- u8 rx[2] ____cacheline_aligned;
- u8 tx[2] ____cacheline_aligned;
+ u8 rx[2] __aligned(IIO_DMA_MINALIGN);
+ u8 tx[2];
};
static const int ad2s1210_mode_vals[4][2] = {
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 1ad94c5060b5..a36e36701c74 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -125,7 +125,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*
* Returns Always returns NETDEV_TX_OK
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
@@ -506,7 +506,7 @@ skip_xmit:
* @dev: Device info structure
* Returns Always returns zero
*/
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
void *packet_buffer;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 78936e9b33b0..6c524668f65a 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -5,8 +5,8 @@
* Copyright (c) 2003-2007 Cavium Networks
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_initialize(void);
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index df02335fdbab..d4e06a3929f3 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1149,19 +1149,7 @@ out_unlock:
return ret;
}
-
-static int pi433_debugfs_regs_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, pi433_debugfs_regs_show, inode->i_private);
-}
-
-static const struct file_operations debugfs_fops = {
- .llseek = seq_lseek,
- .open = pi433_debugfs_regs_open,
- .owner = THIS_MODULE,
- .read = seq_read,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(pi433_debugfs_regs);
/*-------------------------------------------------------------------------*/
@@ -1320,7 +1308,7 @@ static int pi433_probe(struct spi_device *spi)
entry = debugfs_create_dir(dev_name(device->dev),
debugfs_lookup(KBUILD_MODNAME, NULL));
- debugfs_create_file("regs", 0400, entry, device, &debugfs_fops);
+ debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops);
return 0;
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 659c8c1b38fd..8c7fab6a46bb 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -816,7 +816,7 @@ int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
if (size > FIFO_SIZE) {
dev_dbg(&spi->dev,
- "read fifo: passed in buffer bigger then internal buffer\n");
+ "write fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index eea16eb7caa0..fd494c2299e6 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -10,7 +10,6 @@ r8188eu-y = \
hal/hal_com.o \
hal/odm.o \
hal/odm_HWConfig.o \
- hal/odm_RegConfig8188E.o \
hal/odm_RTL8188E.o \
hal/rtl8188e_cmd.o \
hal/rtl8188e_dm.o \
@@ -18,19 +17,14 @@ r8188eu-y = \
hal/rtl8188e_phycfg.o \
hal/rtl8188e_rf6052.o \
hal/rtl8188e_rxdesc.o \
- hal/rtl8188e_xmit.o \
- hal/rtl8188eu_recv.o \
hal/rtl8188eu_xmit.o \
hal/usb_halinit.o \
hal/usb_ops_linux.o \
os_dep/ioctl_linux.o \
- os_dep/mlme_linux.o \
os_dep/os_intfs.o \
os_dep/osdep_service.o \
- os_dep/recv_linux.o \
os_dep/usb_intf.o \
os_dep/usb_ops_linux.o \
- os_dep/xmit_linux.o \
core/rtw_ap.o \
core/rtw_br_ext.o \
core/rtw_cmd.o \
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index 5bd9dfa57cc5..24eb8dce9bfe 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -935,6 +935,48 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
return beacon_updated;
}
+void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
+}
+
+static void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
+}
+
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason)
{
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index bca20fe5c983..4c5f30792a46 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -12,7 +12,6 @@
#include "../include/drv_types.h"
#include "../include/rtw_br_ext.h"
#include "../include/usb_osintf.h"
-#include "../include/recv_osdep.h"
#ifndef csum_ipv6_magic
#include "../include/net/ip6_checksum.h"
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 5b6a891b5d67..3fadace33de6 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/rtw_br_ext.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
@@ -58,8 +56,6 @@ exit:
u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- u32 res = _SUCCESS;
-
init_completion(&pcmdpriv->enqueue_cmd);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
init_completion(&pcmdpriv->start_cmd_thread);
@@ -74,27 +70,24 @@ u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = kzalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
GFP_KERNEL);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = _FAIL;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return _FAIL;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ - 1));
pcmdpriv->rsp_allocated_buf = kzalloc(MAX_RSPSZ + 4, GFP_KERNEL);
if (!pcmdpriv->rsp_allocated_buf) {
- res = _FAIL;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return _FAIL;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
-exit:
- return res;
+ return _SUCCESS;
}
u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
@@ -288,8 +281,7 @@ post_process:
* ### NOTE:#### (!!!!)
* MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
*/
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
- struct rtw_ieee80211_channel *ch, int ch_num)
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num)
{
u8 res = _FAIL;
struct cmd_obj *ph2c;
@@ -331,17 +323,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
}
}
- /* prepare channel list */
- if (ch) {
- int i;
- for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
- if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
- memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
- psurveyPara->ch_num++;
- }
- }
- }
-
set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
@@ -1290,6 +1271,66 @@ exit:
return res;
}
+/* C2H event format:
+ * Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+ * BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+ */
+static s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (!buf)
+ goto exit;
+
+ ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
+ if (ret)
+ return _FAIL;
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ memset(c2h_evt, 0, 16);
+
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++) {
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i, c2h_evt->payload + i);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ }
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /* Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the next
+ * command message.
+ */
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+exit:
+ return ret;
+}
+
static void c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
{
u8 buf[16];
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 95534f9c7a0f..682c65b1e04c 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -236,7 +236,7 @@ static int load_firmware(struct rt_firmware *rtfw, struct device *device)
{
int ret = _SUCCESS;
const struct firmware *fw;
- const char *fw_name = "rtlwifi/rtl8188eufw.bin";
+ const char *fw_name = FW_RTL8188EU;
int err = request_firmware(&fw, fw_name, device);
if (err) {
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 17f6bcbeebf4..55e6b0f41dc3 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -11,8 +11,6 @@
#include "../include/usb_osintf.h"
#include "../include/usb_ops.h"
-extern void indicate_wx_scan_complete_event(struct adapter *padapter);
-
u8 rtw_do_join(struct adapter *padapter)
{
struct list_head *plist, *phead;
@@ -43,7 +41,7 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
/* submit site_survey_cmd */
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
@@ -89,7 +87,7 @@ u8 rtw_do_join(struct adapter *padapter)
/* we try to issue sitesurvey firstly */
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
@@ -353,14 +351,9 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
/* Scan or linking is in progress, do nothing. */
res = true;
} else {
- if (rtw_is_scan_deny(padapter)) {
- indicate_wx_scan_complete_event(padapter);
- return _SUCCESS;
- }
-
spin_lock_bh(&pmlmepriv->lock);
- res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
+ res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num);
spin_unlock_bh(&pmlmepriv->lock);
}
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index d5c6c5e29621..1e316e6358ea 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -25,9 +25,7 @@ static void ResetLedStatus(struct led_priv *pLed)
pLed->bLedWPSBlinkInProgress = false;
pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
- pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
- pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
@@ -37,7 +35,7 @@ static void SwLedOn(struct adapter *padapter, struct led_priv *pLed)
u8 LedCfg;
int res;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->bDriverStopped)
return;
res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);
@@ -53,7 +51,7 @@ static void SwLedOff(struct adapter *padapter, struct led_priv *pLed)
u8 LedCfg;
int res;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->bDriverStopped)
goto exit;
res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */
@@ -79,41 +77,25 @@ static void blink_work(struct work_struct *work)
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
SwLedOff(padapter, pLed);
ResetLedStatus(pLed);
return;
}
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
+ if (pLed->bLedOn)
SwLedOff(padapter, pLed);
+ else
+ SwLedOn(padapter, pLed);
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_BLINK_NORMAL:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_BLINK_SCAN:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -121,7 +103,6 @@ static void blink_work(struct work_struct *work)
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else {
- pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
@@ -131,10 +112,6 @@ static void blink_work(struct work_struct *work)
}
break;
case LED_BLINK_TXRX:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -142,7 +119,6 @@ static void blink_work(struct work_struct *work)
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else {
- pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
@@ -152,25 +128,16 @@ static void blink_work(struct work_struct *work)
}
break;
case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState != RTW_LED_ON) {
+ if (!pLed->bLedOn) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
pLed->bLedWPSBlinkInProgress = false;
} else {
- pLed->BlinkingLedState = RTW_LED_OFF;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
}
break;
@@ -217,192 +184,110 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
switch (LedAction) {
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
- if (!pLed->bLedNoLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- }
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_LINK:
- if (!pLed->bLedLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- }
+ if (!pLed->bLedLinkBlinkInProgress)
+ return;
+
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_CTL_SITE_SURVEY:
- if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- ;
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED)))
+ return;
+
+ if (pLed->bLedScanBlinkInProgress)
+ return;
+
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_CTL_TX:
case LED_CTL_RX:
- if (!pLed->bLedBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
- }
+ if (pLed->bLedBlinkInProgress)
+ return;
+
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- if (!pLed->bLedWPSBlinkInProgress) {
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ if (pLed->bLedWPSBlinkInProgress)
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_CTL_STOP_WPS:
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress)
- cancel_delayed_work(&pLed->blink_work);
- else
- pLed->bLedWPSBlinkInProgress = true;
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = true;
+
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
} else {
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, 0);
}
break;
case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedWPSBlinkInProgress = false;
- }
- pLed->bLedNoLinkBlinkInProgress = true;
+ cancel_delayed_work(&pLed->blink_work);
+ pLed->bLedWPSBlinkInProgress = false;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_POWER_OFF:
pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ cancel_delayed_work(&pLed->blink_work);
SwLedOff(padapter, pLed);
break;
default:
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 2705c9d87b14..5ca03d6cac32 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -5,10 +5,7 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
-#include "../include/mlme_osdep.h"
#include "../include/sta_info.h"
#include "../include/wifi.h"
#include "../include/wlan_bssdef.h"
@@ -190,6 +187,37 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
return ie + 8;
}
+static void rtw_join_timeout_handler(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
+
+ _rtw_join_timeout_handler(adapter);
+}
+
+static void _rtw_scan_timeout_handler(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.scan_to_timer);
+
+ rtw_scan_timeout_handler(adapter);
+}
+
+static void _dynamic_check_timer_handlder(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
+
+ rtw_dynamic_check_timer_handlder(adapter);
+ _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
+}
+
+static void rtw_init_mlme_timer(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ timer_setup(&pmlmepriv->assoc_timer, rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, _rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer, _dynamic_check_timer_handlder, 0);
+}
+
int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
{
int i;
@@ -235,8 +263,6 @@ int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv)
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- rtw_clear_scan_deny(padapter);
-
rtw_init_mlme_timer(padapter);
exit:
@@ -641,6 +667,23 @@ exit:
spin_unlock_bh(&pmlmepriv->lock);
}
+static void rtw_xmit_schedule(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv;
+
+ if (!padapter)
+ return;
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ if (rtw_txframes_pending(padapter))
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -697,7 +740,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
} else {
if (rtw_to_roaming(adapter) != 0) {
if (--pmlmepriv->to_roaming == 0 ||
- rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
+ rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1) != _SUCCESS) {
rtw_set_roaming(adapter, 0);
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
@@ -719,7 +762,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_LINKED))
p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
- rtw_os_xmit_schedule(adapter);
+ rtw_xmit_schedule(adapter);
}
static void free_scanqueue(struct mlme_priv *pmlmepriv)
@@ -795,6 +838,48 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
}
+static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
+
+static void rtw_reset_securitypriv(struct adapter *adapter)
+{
+ u8 backup_index;
+ u8 backup_counter;
+ u32 backup_time;
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ /* 802.1x */
+ /* We have to backup the PMK information for WiFi PMK Caching test item. */
+ /* Backup the btkip_countermeasure information. */
+ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
+ memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backup_index = adapter->securitypriv.PMKIDIndex;
+ backup_counter = adapter->securitypriv.btkip_countermeasure;
+ backup_time = adapter->securitypriv.btkip_countermeasure_time;
+ memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
+
+ /* Restore the PMK information to securitypriv structure for the following connection. */
+ memcpy(&adapter->securitypriv.PMKIDList[0],
+ &backup_pmkid[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backup_index;
+ adapter->securitypriv.btkip_countermeasure = backup_counter;
+ adapter->securitypriv.btkip_countermeasure_time = backup_time;
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ } else {
+ /* reset values in securitypriv */
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ }
+}
+
/*
*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
*/
@@ -809,12 +894,13 @@ void rtw_indicate_connect(struct adapter *padapter)
rtw_led_control(padapter, LED_CTL_LINK);
- rtw_os_indicate_connect(padapter);
+ rtw_indicate_wx_assoc_event(padapter);
+ netif_carrier_on(padapter->pnetdev);
+ if (padapter->pid[2] != 0)
+ rtw_signal_process(padapter->pid[2], SIGALRM);
}
pmlmepriv->to_roaming = 0;
-
- rtw_set_scan_deny(padapter, 3000);
}
/*
@@ -831,11 +917,14 @@ void rtw_indicate_disconnect(struct adapter *padapter)
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
(pmlmepriv->to_roaming <= 0)) {
- rtw_os_indicate_disconnect(padapter);
+ /* Do it first for tx broadcast pkt after disconnection issue! */
+ netif_carrier_off(padapter->pnetdev);
+
+ rtw_indicate_wx_disassoc_event(padapter);
+ rtw_reset_securitypriv(padapter);
_clr_fwstate_(pmlmepriv, _FW_LINKED);
rtw_led_control(padapter, LED_CTL_NO_LINK);
- rtw_clear_scan_deny(padapter);
}
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
@@ -843,9 +932,9 @@ void rtw_indicate_disconnect(struct adapter *padapter)
}
-inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
+inline void rtw_indicate_scan_done(struct adapter *padapter)
{
- rtw_os_indicate_scan_done(padapter, aborted);
+ indicate_wx_scan_complete_event(padapter);
}
static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
@@ -1068,8 +1157,7 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
- rtw_os_xmit_schedule(adapter);
-
+ rtw_xmit_schedule(adapter);
}
void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
@@ -1316,7 +1404,7 @@ void rtw_scan_timeout_handler (struct adapter *adapter)
spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
spin_unlock_bh(&pmlmepriv->lock);
- rtw_indicate_scan_done(adapter, true);
+ rtw_indicate_scan_done(adapter);
}
static void rtw_auto_scan_handler(struct adapter *padapter)
@@ -1442,10 +1530,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
pmlmepriv->pscanned = phead->next;
while (phead != pmlmepriv->pscanned) {
pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- if (!pnetwork) {
- ret = _FAIL;
- goto exit;
- }
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
@@ -1639,6 +1723,33 @@ static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie
return ie_len;
}
+static void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff, *p, i;
+ union iwreq_data wrqu;
+
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_) {
+ buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
+ if (!buff)
+ return;
+ p = buff;
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
+ len = sec_ie[1] + 2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+ for (i = 0; i < len; i++)
+ p += sprintf(p, "%02x", sec_ie[i]);
+ p += sprintf(p, ")");
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = p - buff;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
+ wrqu.data.length : IW_CUSTOM_MAX;
+ wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
+ kfree(buff);
+ }
+}
+
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
{
u8 authmode = 0;
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 32d0e101d0c2..07905e2ae8e0 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -9,8 +9,6 @@
#include "../include/wifi.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/wlan_bssdef.h"
-#include "../include/mlme_osdep.h"
-#include "../include/recv_osdep.h"
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
@@ -334,6 +332,28 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
return chanset_size;
}
+static void _survey_timer_hdl(struct timer_list *t)
+{
+ struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.survey_timer);
+
+ survey_timer_hdl(padapter);
+}
+
+static void _link_timer_hdl(struct timer_list *t)
+{
+ struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.link_timer);
+
+ link_timer_hdl(padapter);
+}
+
+static void init_mlme_ext_timer(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ timer_setup(&pmlmeext->survey_timer, _survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
+}
+
void init_mlme_ext_priv(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
@@ -910,6 +930,46 @@ authclnt_fail:
return _FAIL;
}
+static void UpdateBrateTbl(u8 *mbrate)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mbrate[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+static void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
u16 capab_info;
@@ -1320,9 +1380,9 @@ OnAssocReqFail:
unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
uint i;
int res;
- unsigned short status;
struct ndis_802_11_var_ie *pIE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -1331,7 +1391,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
uint pkt_len = precv_frame->len;
/* check A1 matches or not */
- if (memcmp(myid(&padapter->eeprompriv), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
@@ -1342,28 +1402,24 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
_cancel_timer_ex(&pmlmeext->link_timer);
- /* status */
- status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
- if (status > 0) {
+ if (le16_to_cpu(mgmt->u.assoc_resp.status_code) > 0) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
res = -4;
goto report_assoc_result;
}
- /* get capabilities */
- pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+ pmlmeinfo->capability = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
/* set slot time */
pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
- /* AID */
- pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4)) & 0x3fff);
+ pmlmeinfo->aid = le16_to_cpu(mgmt->u.assoc_resp.aid) & 0x3fff;
res = pmlmeinfo->aid;
/* following are moved to join event callback function */
/* to handle HT, WMM, rate adaptive, update MAC reg */
/* for not to handle the synchronous IO in the tasklet */
- for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ for (i = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); i < pkt_len;) {
pIE = (struct ndis_802_11_var_ie *)(pframe + i);
switch (pIE->ElementID) {
@@ -1391,7 +1447,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
/* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
- UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
+ UpdateBrateTbl(pmlmeinfo->network.SupportedRates);
report_assoc_result:
report_join_res(padapter, res);
@@ -7858,7 +7914,7 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
spin_unlock_bh(&psta_bmc->sleep_q.lock);
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index bd654d4ff8b4..dc159e58f428 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -1883,15 +1883,14 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
{
- int ret = _SUCCESS;
+ int ret;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
/* leave IPS/Autosuspend */
- if (rtw_pwr_wakeup(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
+ return ret;
/* Added by Albert 2011/03/22 */
/* In the P2P mode, the driver should not support the b mode. */
@@ -1902,10 +1901,9 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
init_wifidirect_info(padapter, role);
} else if (role == P2P_ROLE_DISABLE) {
- if (rtw_pwr_wakeup(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
+ return ret;
/* Disable P2P function */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
@@ -1923,6 +1921,5 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
}
-exit:
- return ret;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 10550bd2c16d..870d81735b8d 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -89,7 +89,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
bool ret = false;
- if (adapter->pwrctrlpriv.ips_deny_time >= jiffies)
+ if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
goto exit;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE | WIFI_SITE_MONITOR) ||
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index e5a7b7dfc387..bb5c3b3888e0 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -6,8 +6,6 @@
#include <linux/ieee80211.h>
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/usb_ops.h"
#include "../include/wifi.h"
#include "../include/rtl8188e_recv.h"
@@ -37,6 +35,69 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
}
+static int rtl8188eu_init_recv_priv(struct adapter *padapter)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int i, res = _SUCCESS;
+ struct recv_buf *precvbuf;
+
+ tasklet_init(&precvpriv->recv_tasklet,
+ rtl8188eu_recv_tasklet,
+ (unsigned long)padapter);
+
+ /* init recv_buf */
+ rtw_init_queue(&precvpriv->free_recv_buf_queue);
+
+ precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
+ GFP_KERNEL);
+ if (!precvpriv->pallocated_recv_buf) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!precvbuf->purb) {
+ res = _FAIL;
+ break;
+ }
+ precvbuf->adapter = padapter;
+ precvbuf++;
+ }
+ precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+ skb_queue_head_init(&precvpriv->rx_skb_queue);
+ {
+ int i;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ struct sk_buff *pskb = NULL;
+
+ skb_queue_head_init(&precvpriv->free_recv_skb_queue);
+
+ for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
+ pskb = __netdev_alloc_skb(padapter->pnetdev,
+ MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
+ if (pskb) {
+ pskb->dev = padapter->pnetdev;
+ tmpaddr = (size_t)pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+ skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+ pskb = NULL;
+ }
+ }
+exit:
+ return res;
+}
+
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
int i;
@@ -91,6 +152,26 @@ exit:
return res;
}
+static void rtl8188eu_free_recv_priv(struct adapter *padapter)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ usb_free_urb(precvbuf->purb);
+ precvbuf++;
+ }
+
+ kfree(precvpriv->pallocated_recv_buf);
+
+ skb_queue_purge(&precvpriv->rx_skb_queue);
+
+ skb_queue_purge(&precvpriv->free_recv_skb_queue);
+}
+
void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -244,6 +325,42 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
return cnt;
}
+static void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
+{
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 cur_time = 0;
+
+ if (psecuritypriv->last_mic_err_time == 0) {
+ psecuritypriv->last_mic_err_time = jiffies;
+ } else {
+ cur_time = jiffies;
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ } else {
+ psecuritypriv->last_mic_err_time = jiffies;
+ }
+ }
+
+ memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
+ memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+ wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
+ &wrqu, (char *)&ev);
+}
+
static int recvframe_chkmic(struct adapter *adapter, struct recv_frame *precvframe)
{
int i, res = _SUCCESS;
@@ -1294,7 +1411,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
u8 nr_subframes, i;
unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
- unsigned char *data_ptr;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
@@ -1329,8 +1445,7 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if (sub_skb) {
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, pdata, nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
} else {
sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
@@ -1460,6 +1575,85 @@ static bool enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, s
return true;
}
+static int rtw_recv_indicatepkt(struct adapter *padapter, struct recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *pfree_recv_queue;
+ struct sk_buff *skb;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ precvpriv = &padapter->recvpriv;
+ pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ skb = precv_frame->pkt;
+ if (!skb)
+ goto _recv_indicatepkt_drop;
+
+ skb->data = precv_frame->rx_data;
+
+ skb_set_tail_pointer(skb, precv_frame->len);
+
+ skb->len = precv_frame->len;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ struct sk_buff *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ bool bmcast = is_multicast_ether_addr(pattrib->dst);
+
+ if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pskb2 = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->dst);
+ }
+
+ if (psta) {
+ struct net_device *pnetdev;
+
+ pnetdev = (struct net_device *)padapter->pnetdev;
+ skb->dev = pnetdev;
+ skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
+
+ rtw_xmit_entry(skb, pnetdev);
+
+ if (bmcast)
+ skb = pskb2;
+ else
+ goto _recv_indicatepkt_end;
+ }
+ }
+ }
+
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = padapter->pnetdev;
+ skb->protocol = eth_type_trans(skb, padapter->pnetdev);
+
+ netif_rx(skb);
+
+_recv_indicatepkt_end:
+
+ /* pointers to NULL before rtw_free_recvframe() */
+ precv_frame->pkt = NULL;
+
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ /* enqueue back to free_recv_queue */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ return _FAIL;
+}
+
static bool recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
struct list_head *phead, *plist;
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index 357f98e22d8a..98eeb16cab6c 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -5,9 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/sta_info.h"
static void _rtw_init_stainfo(struct sta_info *psta)
@@ -141,6 +138,31 @@ void _rtw_free_sta_priv(struct sta_priv *pstapriv)
}
}
+static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
+{
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ preorder_ctrl = from_timer(preorder_ctrl, t, reordering_ctrl_timer);
+ rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
+}
+
+static void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer, _rtw_reordering_ctrl_timeout_handler, 0);
+}
+
+static void _addba_timer_hdl(struct timer_list *t)
+{
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
+
+ addba_timer_hdl(psta);
+}
+
+static void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
+{
+ timer_setup(&psta->addba_retry_timer, _addba_timer_hdl, 0);
+}
+
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 3a002cb6834f..e50631848cab 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -222,46 +222,6 @@ void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrat
memcpy(pbssrate, supportedrates, *bssrate_len);
}
-void UpdateBrateTbl(struct adapter *Adapter, u8 *mbrate)
-{
- u8 i;
- u8 rate;
-
- /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- rate = mbrate[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_24MB:
- mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
-void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
-{
- u8 i;
- u8 rate;
-
- for (i = 0; i < bssratelen; i++) {
- rate = bssrateset[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
void Save_DM_Func_Flag(struct adapter *padapter)
{
struct hal_data_8188e *haldata = &padapter->haldata;
@@ -1578,10 +1538,8 @@ void beacon_timing_control(struct adapter *padapter)
static struct adapter *pbuddy_padapter;
-int rtw_handle_dualmac(struct adapter *adapter, bool init)
+void rtw_handle_dualmac(struct adapter *adapter, bool init)
{
- int status = _SUCCESS;
-
if (init) {
if (!pbuddy_padapter) {
pbuddy_padapter = adapter;
@@ -1594,5 +1552,4 @@ int rtw_handle_dualmac(struct adapter *adapter, bool init)
} else {
pbuddy_padapter = NULL;
}
- return status;
}
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 24401f3ae2a0..873d2c5c3634 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -33,6 +33,32 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
}
+static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf,
+ u32 alloc_sz)
+{
+ pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
+ if (!pxmitbuf->pallocated_buf)
+ return _FAIL;
+
+ pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->dma_transfer_addr = 0;
+
+ pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxmitbuf->pxmit_urb) {
+ kfree(pxmitbuf->pallocated_buf);
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+static void rtw_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf,
+ u32 free_sz)
+{
+ usb_free_urb(pxmitbuf->pxmit_urb);
+ kfree(pxmitbuf->pallocated_buf);
+}
+
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
{
int i;
@@ -108,7 +134,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (!pxmitpriv->pallocated_xmitbuf) {
res = _FAIL;
- goto exit;
+ goto free_frame_buf;
}
pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
@@ -125,12 +151,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->ext_tag = false;
/* Tx buf allocation may fail sometimes, so sleep and retry. */
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
msleep(10);
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL)
- goto exit;
+ goto free_xmitbuf;
}
pxmitbuf->flags = XMIT_VO_QUEUE;
@@ -148,7 +174,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (!pxmitpriv->pallocated_xmit_extbuf) {
res = _FAIL;
- goto exit;
+ goto free_xmitbuf;
}
pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
@@ -162,10 +188,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->padapter = padapter;
pxmitbuf->ext_tag = true;
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
if (res == _FAIL) {
res = _FAIL;
- goto exit;
+ goto free_xmit_extbuf;
}
list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
@@ -176,7 +202,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (rtw_alloc_hwxmits(padapter)) {
res = _FAIL;
- goto exit;
+ goto free_xmit_extbuf;
}
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
@@ -200,11 +226,54 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
rtl8188eu_init_xmit_priv(padapter);
-exit:
+ return _SUCCESS;
+free_xmit_extbuf:
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ while (i--) {
+ rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
+ i = NR_XMITBUFF;
+free_xmitbuf:
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+ while (i--) {
+ rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+ vfree(pxmitpriv->pallocated_xmitbuf);
+free_frame_buf:
+ vfree(pxmitpriv->pallocated_frame_buf);
+exit:
return res;
}
+static void rtw_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
+{
+ u16 queue;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_pkt_complete(padapter, pxframe->pkt);
+ pxframe->pkt = NULL;
+}
+
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
{
int i;
@@ -218,13 +287,13 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
return;
for (i = 0; i < NR_XMITFRAME; i++) {
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
pxmitframe++;
}
for (i = 0; i < NR_XMITBUFF; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
pxmitbuf++;
}
@@ -234,7 +303,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
pxmitbuf++;
}
@@ -378,18 +447,59 @@ u8 qos_acm(u8 acm_mask, u8 priority)
return change_priority;
}
+static void rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
+{
+ if (!pktptr) {
+ pr_err("8188eu: pktptr is NULL\n");
+ return;
+ }
+ if (!pfile) {
+ pr_err("8188eu: pfile is NULL\n");
+ return;
+ }
+ pfile->pkt = pktptr;
+ pfile->cur_addr = pktptr->data;
+ pfile->buf_start = pktptr->data;
+ pfile->pkt_len = pktptr->len;
+ pfile->buf_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start;
+}
+
+static uint rtw_remainder_len(struct pkt_file *pfile)
+{
+ return pfile->buf_len - ((size_t)(pfile->cur_addr) -
+ (size_t)(pfile->buf_start));
+}
+
+static uint rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len;
+
+ len = rtw_remainder_len(pfile);
+ len = (rlen > len) ? len : rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+
+ return len;
+}
+
static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
struct ethhdr etherhdr;
struct iphdr ip_hdr;
s32 user_prio = 0;
- _rtw_open_pktfile(ppktfile->pkt, ppktfile);
- _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+ rtw_open_pktfile(ppktfile->pkt, ppktfile);
+ rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
/* get user_prio from IP hdr */
if (pattrib->ether_type == 0x0800) {
- _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
+ rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
user_prio = ip_hdr.tos >> 5;
} else if (pattrib->ether_type == 0x888e) {
@@ -418,8 +528,8 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
- _rtw_open_pktfile(pkt, &pktfile);
- _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+ rtw_open_pktfile(pkt, &pktfile);
+ rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
@@ -447,7 +557,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
/* to prevent DHCP protocol fail */
u8 tmp[24];
- _rtw_pktfile_read(&pktfile, &tmp[0], 24);
+ rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
@@ -460,9 +570,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
}
- if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
- rtw_set_scan_deny(padapter, 3000);
-
/* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
@@ -897,8 +1004,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
goto exit;
}
- _rtw_open_pktfile(pkt, &pktfile);
- _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+ rtw_open_pktfile(pkt, &pktfile);
+ rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
frg_inx = 0;
frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
@@ -956,9 +1063,9 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
if (bmcst) {
/* don't do fragment to broadcast/multicast packets */
- mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
+ mem_sz = rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
} else {
- mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
+ mem_sz = rtw_pktfile_read(&pktfile, pframe, mpdu_len);
}
pframe += mem_sz;
@@ -970,7 +1077,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
frg_inx++;
- if (bmcst || rtw_endofpktfile(&pktfile)) {
+ if (bmcst || pktfile.pkt_len == 0) {
pattrib->nr_frags = frg_inx;
pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1286,7 +1393,7 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
spin_unlock_bh(&pfree_xmit_queue->lock);
if (pndis_pkt)
- rtw_os_pkt_complete(padapter, pndis_pkt);
+ rtw_pkt_complete(padapter, pndis_pkt);
exit:
@@ -1945,7 +2052,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&psta->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta->sleep_q.lock);
}
@@ -1995,7 +2102,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
@@ -2069,7 +2176,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
pxmitframe->attrib.triggered = 1;
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -2136,3 +2243,105 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
}
+
+static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt <= 4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct list_head *phead, *plist;
+ struct sk_buff *newskb;
+ struct sta_info *psta = NULL;
+ s32 res;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+ plist = phead->next;
+
+ /* free sta asoc_queue */
+ while (phead != plist) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ plist = plist->next;
+
+ /* avoid come from STA1 and send back STA1 */
+ if (!memcmp(psta->hwaddr, &skb->data[6], 6))
+ continue;
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+
+ if (newskb) {
+ memcpy(newskb->data, psta->hwaddr, 6);
+ res = rtw_xmit(padapter, &newskb);
+ if (res < 0) {
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(newskb);
+ } else {
+ pxmitpriv->tx_pkts++;
+ }
+ } else {
+ pxmitpriv->tx_drop++;
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ return false; /* Caller shall tx this multicast frame via normal way. */
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ dev_kfree_skb_any(skb);
+ return true;
+}
+
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ s32 res = 0;
+
+ if (!rtw_if_up(padapter))
+ goto drop_packet;
+
+ rtw_check_xmit_resource(padapter, pkt);
+
+ if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
+ (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
+ (padapter->registrypriv.wifi_spec == 0)) {
+ if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
+ res = rtw_mlcst2unicst(padapter, pkt);
+ if (res)
+ goto exit;
+ }
+ }
+
+ res = rtw_xmit(padapter, &pkt);
+ if (res < 0)
+ goto drop_packet;
+
+ pxmitpriv->tx_pkts++;
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(pkt);
+
+exit:
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index 7901d0afa2e7..23b7205722b5 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -166,7 +166,14 @@ static u32 array_agc_tab_1t_8188e[] = {
0xC78, 0x407F0001,
};
-enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+}
+
+int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
@@ -176,7 +183,6 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapter = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
@@ -187,7 +193,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -238,10 +244,10 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
- rst = HAL_STATUS_FAILURE;
+ return -1;
}
}
- return rst;
+ return 0;
}
/******************************************************************************
@@ -442,7 +448,31 @@ static u32 array_phy_reg_1t_8188e[] = {
0xF00, 0x00000300,
};
-enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ msleep(50);
+ } else if (Addr == 0xfd) {
+ mdelay(5);
+ } else if (Addr == 0xfc) {
+ mdelay(1);
+ } else if (Addr == 0xfb) {
+ udelay(50);
+ } else if (Addr == 0xfa) {
+ udelay(5);
+ } else if (Addr == 0xf9) {
+ udelay(1);
+ } else {
+ if (Addr == 0xa24)
+ pDM_Odm->RFCalibrateInfo.RegA24 = Data;
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+}
+
+int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
@@ -452,7 +482,6 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapter = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
hex += 0xFF000000;
@@ -462,7 +491,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -544,11 +573,11 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
}
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ return -1;
}
}
- return rst;
+ return 0;
}
/******************************************************************************
@@ -647,6 +676,25 @@ static u32 array_phy_reg_pg_8188e[] = {
};
+static void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask,
+ u32 Data)
+{
+ if (Addr == 0xfe)
+ msleep(50);
+ else if (Addr == 0xfd)
+ mdelay(5);
+ else if (Addr == 0xfc)
+ mdelay(1);
+ else if (Addr == 0xfb)
+ udelay(50);
+ else if (Addr == 0xfa)
+ udelay(5);
+ else if (Addr == 0xf9)
+ udelay(1);
+ else
+ storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
+}
+
void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index 77b25885c63b..da71867bcca3 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -126,7 +126,12 @@ static u32 array_MAC_REG_8188E[] = {
0x70B, 0x00000087,
};
-enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
+{
+ rtw_write8(pDM_Odm->Adapter, Addr, Data);
+}
+
+int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
{
#define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = array[i]; v2 = array[i + 1]; } while (0)
@@ -139,7 +144,6 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapt = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
hex += 0xFF000000;
@@ -150,7 +154,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapt);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -201,8 +205,8 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
- rst = HAL_STATUS_FAILURE;
+ return -1;
}
}
- return rst;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 08cbfce3808d..a4c3d3d149f7 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -130,7 +130,37 @@ static u32 Array_RadioA_1T_8188E[] = {
0x000, 0x00033E60,
};
-enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
+static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, u32 RegAddr)
+{
+ if (Addr == 0xffe) {
+ msleep(50);
+ } else if (Addr == 0xfd) {
+ mdelay(5);
+ } else if (Addr == 0xfc) {
+ mdelay(1);
+ } else if (Addr == 0xfb) {
+ udelay(50);
+ } else if (Addr == 0xfa) {
+ udelay(5);
+ } else if (Addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+}
+
+static void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content & 0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, Addr | maskforPhySet);
+}
+
+int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
{
#define READ_NEXT_PAIR(v1, v2, i) do \
{ i += 2; v1 = Array[i]; \
@@ -144,7 +174,6 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
struct adapter *Adapter = pDM_Odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
@@ -155,7 +184,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(Adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -232,9 +261,9 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
}
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ return -1;
}
}
- return rst;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index 6a1cdc67335b..33967eb3c0d0 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -137,176 +137,3 @@ void HalSetBrateCfg(struct adapter *adapt, u8 *brates, u16 *rate_cfg)
}
}
}
-
-static void one_out_pipe(struct adapter *adapter)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-}
-
-static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- if (wifi_cfg) { /* WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 0, 1, 0, 1, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 1, 0, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
- }
-}
-
-static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- if (wifi_cfg) {/* for WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 2, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
- }
-}
-
-bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
-{
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- bool wifi_cfg = pregistrypriv->wifi_spec;
- bool result = true;
-
- switch (numoutpipe) {
- case 2:
- two_out_pipe(adapter, wifi_cfg);
- break;
- case 3:
- three_out_pipe(adapter, wifi_cfg);
- break;
- case 1:
- one_out_pipe(adapter);
- break;
- default:
- result = false;
- break;
- }
- return result;
-}
-
-/*
-* C2H event format:
-* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
-* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
-*/
-
-s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
-{
- s32 ret = _FAIL;
- struct c2h_evt_hdr *c2h_evt;
- int i;
- u8 trigger;
-
- if (!buf)
- goto exit;
-
- ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
- if (ret)
- return _FAIL;
-
- if (trigger == C2H_EVT_HOST_CLOSE)
- goto exit; /* Not ready */
- else if (trigger != C2H_EVT_FW_CLOSE)
- goto clear_evt; /* Not a valid value */
-
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- memset(c2h_evt, 0, 16);
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- /* Read the content */
- for (i = 0; i < c2h_evt->plen; i++) {
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
- sizeof(*c2h_evt) + i, c2h_evt->payload + i);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- }
-
- ret = _SUCCESS;
-
-clear_evt:
- /*
- * Clear event to notify FW we have read the command.
- * If this field isn't clear, the FW won't update the next
- * command message.
- */
- rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
-exit:
- return ret;
-}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 54cc3d7789cd..38f357e8aeda 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -3,38 +3,38 @@
#include "../include/drv_types.h"
-static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+static u8 odm_query_rxpwrpercentage(s8 antpower)
{
- if ((AntPower <= -100) || (AntPower >= 20))
- return 0;
- else if (AntPower >= 0)
- return 100;
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
else
- return 100 + AntPower;
+ return 100 + antpower;
}
-static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
+static s32 odm_signal_scale_mapping(struct odm_dm_struct *dm_odm, s32 currsig)
{
- s32 RetSig = 0;
-
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40) * 2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ s32 retsig;
+
+ if (currsig >= 51 && currsig <= 100)
+ retsig = 100;
+ else if (currsig >= 41 && currsig <= 50)
+ retsig = 80 + ((currsig - 40) * 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 10 && currsig <= 20)
+ retsig = 42 + (((currsig - 10) * 2) / 3);
+ else if (currsig >= 5 && currsig <= 9)
+ retsig = 22 + (((currsig - 5) * 3) / 2);
+ else if (currsig >= 1 && currsig <= 4)
+ retsig = 6 + (((currsig - 1) * 3) / 2);
else
- RetSig = CurrSig;
+ retsig = currsig;
- return RetSig;
+ return retsig;
}
static u8 odm_evm_db_to_percentage(s8 value)
@@ -117,7 +117,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
break;
}
rx_pwr_all += 6;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
if (!cck_highpwr) {
if (PWDB_ALL >= 80)
PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
@@ -162,7 +162,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxPwr[i] = rx_pwr[i];
/* Translate DBM to percentage. */
- RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ RSSI = odm_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += RSSI;
pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
@@ -173,7 +173,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
pPhyInfo->RxPWDBAll = PWDB_ALL;
pPhyInfo->RxPower = rx_pwr_all;
@@ -200,10 +200,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
/* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
if (isCCKrate) {
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
+ pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
} else {
if (rf_rx_num != 0)
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, total_rssi /= rf_rx_num));
+ pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, total_rssi /= rf_rx_num));
}
/* For 88E HW Antenna Diversity */
@@ -347,8 +347,3 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
-
-enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm)
-{
- return ODM_ReadAndConfig_RadioA_1T_8188E(dm_odm);
-}
diff --git a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
deleted file mode 100644
index 0fa17a99f9e9..000000000000
--- a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, u32 RegAddr)
-{
- if (Addr == 0xffe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RegAddr, bRFRegOffsetMask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
-
-void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
-{
- u32 content = 0x1000; /* RF_Content: radioa_txt */
- u32 maskforPhySet = (u32)(content & 0xE000);
-
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, Addr | maskforPhySet);
-}
-
-void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
-{
- rtw_write8(pDM_Odm->Adapter, Addr, Data);
-}
-
-void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
-}
-
-void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data)
-{
- if (Addr == 0xfe)
- msleep(50);
- else if (Addr == 0xfd)
- mdelay(5);
- else if (Addr == 0xfc)
- mdelay(1);
- else if (Addr == 0xfb)
- udelay(50);
- else if (Addr == 0xfa)
- udelay(5);
- else if (Addr == 0xf9)
- udelay(1);
- else
- storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
-}
-
-void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- if (Addr == 0xfe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- if (Addr == 0xa24)
- pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index b01ee1695fee..8310d7f53982 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/rtw_ioctl_set.h"
#include "../include/rtl8188e_hal.h"
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 5b8f1a912bbb..158260547f2b 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -526,43 +526,38 @@ void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf)
Hal_EfuseReadEFuse88E(Adapter, 0, _size_byte, pbuf);
}
-static void dump_chip_info(struct HAL_VERSION chip_vers)
+static void dump_chip_info(struct adapter *adapter, struct HAL_VERSION chip_vers)
{
- uint cnt = 0;
- char buf[128];
-
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
- cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
- "Normal_Chip" : "Test_Chip");
- cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
- "TSMC" : "UMC");
+ struct net_device *netdev = adapter->pnetdev;
+ char *cut = NULL;
+ char buf[25];
switch (chip_vers.CUTVersion) {
case A_CUT_VERSION:
- cnt += sprintf((buf + cnt), "A_CUT_");
+ cut = "A_CUT";
break;
case B_CUT_VERSION:
- cnt += sprintf((buf + cnt), "B_CUT_");
+ cut = "B_CUT";
break;
case C_CUT_VERSION:
- cnt += sprintf((buf + cnt), "C_CUT_");
+ cut = "C_CUT";
break;
case D_CUT_VERSION:
- cnt += sprintf((buf + cnt), "D_CUT_");
+ cut = "D_CUT";
break;
case E_CUT_VERSION:
- cnt += sprintf((buf + cnt), "E_CUT_");
+ cut = "E_CUT";
break;
default:
- cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion);
+ snprintf(buf, sizeof(buf), "UNKNOWN_CUT(%d)", chip_vers.CUTVersion);
+ cut = buf;
break;
}
- cnt += sprintf((buf + cnt), "1T1R_");
-
- cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
-
- pr_info("%s", buf);
+ netdev_dbg(netdev, "Chip Version Info: CHIP_8188E_%s_%s_%s_1T1R_RomVer(%d)\n",
+ IS_NORMAL_CHIP(chip_vers) ? "Normal_Chip" : "Test_Chip",
+ IS_CHIP_VENDOR_TSMC(chip_vers) ? "TSMC" : "UMC",
+ cut, 0);
}
void rtl8188e_read_chip_version(struct adapter *padapter)
@@ -581,7 +576,7 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
- dump_chip_info(ChipVersion);
+ dump_chip_info(padapter, ChipVersion);
pHalData->VersionID = ChipVersion;
}
@@ -688,6 +683,7 @@ Hal_EfuseParseIDCode88E(
)
{
struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
+ struct net_device *netdev = padapter->pnetdev;
u16 EEPROMId;
/* Check 0x8129 again for making sure autoload status!! */
@@ -699,7 +695,7 @@ Hal_EfuseParseIDCode88E(
pEEPROM->bautoload_fail_flag = false;
}
- pr_info("EEPROM ID = 0x%04x\n", EEPROMId);
+ netdev_dbg(netdev, "EEPROM ID = 0x%04x\n", EEPROMId);
}
static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index dea6d915a1f4..532c63bce0bf 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -12,26 +12,12 @@
/* 1. BB register R/W API */
/* */
-/**
-* Function: phy_CalculateBitShift
-*
-* OverView: Get shifted position of the BitMask
-*
-* Input:
-* u32 BitMask,
-*
-* Output: none
-* Return: u32 Return the shift bit bit position of the mask
-*/
-static u32 phy_CalculateBitShift(u32 BitMask)
+/* Get shifted position of the bit mask */
+static u32 phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((BitMask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
/**
@@ -62,7 +48,7 @@ rtl8188e_PHY_QueryBBReg(
if (res)
return 0;
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
ReturnValue = (OriginalValue & BitMask) >> BitShift;
return ReturnValue;
}
@@ -95,7 +81,7 @@ void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u3
if (res)
return;
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
}
@@ -267,7 +253,7 @@ u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
Original_Value = phy_RFSerialRead(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Readback_Value = (Original_Value & BitMask) >> BitShift;
return Readback_Value;
}
@@ -302,7 +288,7 @@ rtl8188e_PHY_SetRFReg(
/* RF data is 12 bits only */
if (BitMask != bRFRegOffsetMask) {
Original_Value = phy_RFSerialRead(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
}
@@ -337,7 +323,7 @@ s32 PHY_MACConfig8188E(struct adapter *Adapter)
/* */
/* Config MAC */
/* */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv))
rtStatus = _FAIL;
/* 2010.07.13 AMPDU aggregation number B */
@@ -469,7 +455,7 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
/* 1. Read PHY_REG.TXT BB INIT!! */
/* We will separate as 88C / 92C according to chip version */
/* */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv))
return _FAIL;
/* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
@@ -479,7 +465,7 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
}
/* 3. BB AGC table Initialization */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv))
return _FAIL;
return _SUCCESS;
@@ -521,15 +507,6 @@ PHY_BBConfig8188E(
return rtStatus;
}
-int PHY_RFConfig8188E(struct adapter *Adapter)
-{
- int rtStatus = _SUCCESS;
-
- /* RF config */
- rtStatus = PHY_RF6052_Config8188E(Adapter);
- return rtStatus;
-}
-
static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
u8 *BW40PowerLevel)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index d043b7bc4142..e5ec6e563fbd 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -366,7 +366,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
}
}
-static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
+int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
{
struct bb_reg_def *pPhyReg;
struct hal_data_8188e *pHalData = &Adapter->haldata;
@@ -396,7 +396,7 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_RadioA_1T_8188E(&pHalData->odmpriv))
rtStatus = _FAIL;
/*----Restore RFENV control type----*/;
@@ -404,14 +404,3 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
return rtStatus;
}
-
-int PHY_RF6052_Config8188E(struct adapter *Adapter)
-{
- int rtStatus = _SUCCESS;
-
- /* */
- /* Config BB and RF */
- /* */
- rtStatus = phy_RF6052_Config_ParaFile(Adapter);
- return rtStatus;
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_xmit.c b/drivers/staging/r8188eu/hal/rtl8188e_xmit.c
deleted file mode 100644
index 46b871f3f631..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_xmit.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_XMIT_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-
-void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
-{
- struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
-
- if (txrpt_ccx->int_ccx) {
- if (txrpt_ccx->pkt_ok)
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_SUCCESS);
- else
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
deleted file mode 100644
index def6d0d6e402..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
+++ /dev/null
@@ -1,91 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188EU_RECV_C_
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
-
-#include "../include/usb_ops.h"
-#include "../include/wifi.h"
-
-#include "../include/rtl8188e_hal.h"
-
-int rtl8188eu_init_recv_priv(struct adapter *padapter)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int i, res = _SUCCESS;
- struct recv_buf *precvbuf;
-
- tasklet_init(&precvpriv->recv_tasklet,
- rtl8188eu_recv_tasklet,
- (unsigned long)padapter);
-
- /* init recv_buf */
- rtw_init_queue(&precvpriv->free_recv_buf_queue);
-
- precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
- GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf) {
- res = _FAIL;
- goto exit;
- }
-
- precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
- if (res == _FAIL)
- break;
- precvbuf->adapter = padapter;
- precvbuf++;
- }
- precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- skb_queue_head_init(&precvpriv->rx_skb_queue);
- {
- int i;
- size_t tmpaddr = 0;
- size_t alignment = 0;
- struct sk_buff *pskb = NULL;
-
- skb_queue_head_init(&precvpriv->free_recv_skb_queue);
-
- for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
- pskb = __netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
- if (pskb) {
- pskb->dev = padapter->pnetdev;
- tmpaddr = (size_t)pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
-
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
- pskb = NULL;
- }
- }
-exit:
- return res;
-}
-
-void rtl8188eu_free_recv_priv(struct adapter *padapter)
-{
- int i;
- struct recv_buf *precvbuf;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- rtw_os_recvbuf_resource_free(padapter, precvbuf);
- precvbuf++;
- }
-
- kfree(precvpriv->pallocated_recv_buf);
-
- skb_queue_purge(&precvpriv->rx_skb_queue);
-
- skb_queue_purge(&precvpriv->free_recv_skb_queue);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index bdfa51949289..8e4a5acc0b18 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -431,7 +431,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_os_xmit_complete(adapt, pxmitframe);
+ rtw_xmit_complete(adapt, pxmitframe);
/* 3 2. aggregate same priority and same DA(AP or STA) frames */
pfirstframe = pxmitframe;
@@ -501,7 +501,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_os_xmit_complete(adapt, pxmitframe);
+ rtw_xmit_complete(adapt, pxmitframe);
/* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index ff074d246dab..d28b4dc2a767 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -13,40 +13,77 @@
#include "../include/usb_osintf.h"
#include "../include/HalPwrSeqCmd.h"
-static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
+static void one_out_pipe(struct adapter *adapter)
{
- struct hal_data_8188e *haldata = &adapt->haldata;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
- switch (NumOutPipe) {
- case 3:
- haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_LQ | TX_SELE_NQ;
- haldata->OutEpNumber = 3;
- break;
- case 2:
- haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_NQ;
- haldata->OutEpNumber = 2;
- break;
- case 1:
- haldata->OutEpQueueSel = TX_SELE_HQ;
- haldata->OutEpNumber = 1;
- break;
- default:
- break;
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+}
+
+static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+
+ if (wifi_cfg) {
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+ } else {
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
}
}
-static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumOutPipe)
+static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ /* 0:H, 1:N, 2:L */
- _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
- return Hal_MappingOutPipe(adapt, NumOutPipe);
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+
+ pdvobjpriv->Queue2Pipe[3] = wifi_cfg ?
+ pdvobjpriv->RtOutPipe[1] : pdvobjpriv->RtOutPipe[2];/* BK */
}
-void rtl8188eu_interface_configure(struct adapter *adapt)
+int rtl8188eu_interface_configure(struct adapter *adapt)
{
+ struct registry_priv *pregistrypriv = &adapt->registrypriv;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
+ bool wifi_cfg = pregistrypriv->wifi_spec;
- HalUsbSetQueuePipeMapping8188EUsb(adapt, pdvobjpriv->RtNumOutPipes);
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ switch (pdvobjpriv->RtNumOutPipes) {
+ case 3:
+ haldata->out_ep_extra_queues = TX_SELE_LQ | TX_SELE_NQ;
+ three_out_pipe(adapt, wifi_cfg);
+ break;
+ case 2:
+ haldata->out_ep_extra_queues = TX_SELE_NQ;
+ two_out_pipe(adapt, wifi_cfg);
+ break;
+ case 1:
+ one_out_pipe(adapt);
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
}
u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
@@ -116,32 +153,24 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 numHQ = 0;
- u32 numLQ = 0;
- u32 numNQ = 0;
- u32 numPubQ;
- u32 value32;
- u8 value8;
- bool bWiFiConfig = pregistrypriv->wifi_spec;
-
- if (bWiFiConfig) {
- if (haldata->OutEpQueueSel & TX_SELE_HQ)
- numHQ = 0x29;
+ u8 numLQ = 0;
+ u8 numNQ = 0;
+ u8 numPubQ;
- if (haldata->OutEpQueueSel & TX_SELE_LQ)
+ if (pregistrypriv->wifi_spec) {
+ if (haldata->out_ep_extra_queues & TX_SELE_LQ)
numLQ = 0x1C;
/* NOTE: This step shall be proceed before writing REG_RQPN. */
- if (haldata->OutEpQueueSel & TX_SELE_NQ)
+ if (haldata->out_ep_extra_queues & TX_SELE_NQ)
numNQ = 0x1C;
- value8 = (u8)_NPQ(numNQ);
- rtw_write8(Adapter, REG_RQPN_NPQ, value8);
- numPubQ = 0xA8 - numHQ - numLQ - numNQ;
+ rtw_write8(Adapter, REG_RQPN_NPQ, numNQ);
+
+ numPubQ = 0xA8 - NUM_HQ - numLQ - numNQ;
/* TX DMA */
- value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
- rtw_write32(Adapter, REG_RQPN, value32);
+ rtw_write32(Adapter, REG_RQPN, LD_RQPN | numPubQ << 16 | numLQ << 8 | NUM_HQ);
} else {
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0000);/* Just follow MP Team,??? Georgia 03/28 */
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0d);
@@ -187,69 +216,20 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
}
-static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- u16 value = 0;
- switch (haldata->OutEpQueueSel) {
- case TX_SELE_HQ:
- value = QUEUE_HIGH;
- break;
- case TX_SELE_LQ:
- value = QUEUE_LOW;
- break;
- case TX_SELE_NQ:
- value = QUEUE_NORMAL;
- break;
- default:
- break;
- }
- _InitNormalChipRegPriority(Adapter, value, value, value, value,
- value, value);
-}
-
static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
- u16 valueHi = 0;
- u16 valueLow = 0;
-
- switch (haldata->OutEpQueueSel) {
- case (TX_SELE_HQ | TX_SELE_LQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_NQ | TX_SELE_LQ):
- valueHi = QUEUE_NORMAL;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_HQ | TX_SELE_NQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_NORMAL;
- break;
- default:
- break;
- }
+ u16 bkQ, voQ;
if (!pregistrypriv->wifi_spec) {
- beQ = valueLow;
- bkQ = valueLow;
- viQ = valueHi;
- voQ = valueHi;
- mgtQ = valueHi;
- hiQ = valueHi;
+ bkQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
} else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- beQ = valueLow;
- bkQ = valueHi;
- viQ = valueHi;
- voQ = valueLow;
- mgtQ = valueHi;
- hiQ = valueHi;
+ bkQ = QUEUE_HIGH;
+ voQ = QUEUE_NORMAL;
}
- _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ _InitNormalChipRegPriority(Adapter, QUEUE_NORMAL, bkQ, QUEUE_HIGH,
+ voQ, QUEUE_HIGH, QUEUE_HIGH);
}
static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
@@ -277,11 +257,12 @@ static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
static void _InitQueuePriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = &Adapter->haldata;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
- switch (haldata->OutEpNumber) {
+ switch (pdvobjpriv->RtNumOutPipes) {
case 1:
- _InitNormalChipOneOutEpPriority(Adapter);
+ _InitNormalChipRegPriority(Adapter, QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH,
+ QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH);
break;
case 2:
_InitNormalChipTwoOutEpPriority(Adapter);
@@ -515,8 +496,7 @@ static int _InitBeaconParameters(struct adapter *Adapter)
return 0;
}
-static void _BeaconFunctionEnable(struct adapter *Adapter,
- bool Enable, bool Linked)
+static void _BeaconFunctionEnable(struct adapter *Adapter)
{
rtw_write8(Adapter, REG_BCN_CTRL, (BIT(4) | BIT(3) | BIT(1)));
@@ -567,7 +547,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
{
u8 value8 = 0;
u16 value16;
- u8 txpktbuf_bndy;
u32 status = _SUCCESS;
int res;
struct hal_data_8188e *haldata = &Adapter->haldata;
@@ -600,13 +579,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* HW GPIO pin. Before PHY_RFConfig8192C. */
/* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
- if (!pregistrypriv->wifi_spec) {
- txpktbuf_bndy = TX_PAGE_BOUNDARY_88E;
- } else {
- /* for WMM */
- txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
- }
-
_InitQueueReservedPage(Adapter);
_InitQueuePriority(Adapter);
_InitPageBoundary(Adapter);
@@ -639,7 +611,7 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
if (status == _FAIL)
goto exit;
- status = PHY_RFConfig8188E(Adapter);
+ status = phy_RF6052_Config_ParaFile(Adapter);
if (status == _FAIL)
goto exit;
@@ -647,9 +619,9 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
if (status == _FAIL)
goto exit;
- _InitTxBufferBoundary(Adapter, txpktbuf_bndy);
+ _InitTxBufferBoundary(Adapter, TX_PAGE_BOUNDARY_88E);
- status = InitLLTTable(Adapter, txpktbuf_bndy);
+ status = InitLLTTable(Adapter, TX_PAGE_BOUNDARY_88E);
if (status == _FAIL)
goto exit;
@@ -922,7 +894,7 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
}
}
-void ReadAdapterInfo8188EU(struct adapter *Adapter)
+int ReadAdapterInfo8188EU(struct adapter *Adapter)
{
struct eeprom_priv *eeprom = &Adapter->eeprompriv;
struct led_priv *ledpriv = &Adapter->ledpriv;
@@ -933,13 +905,13 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter)
/* check system boot selection */
res = rtw_read8(Adapter, REG_9346CR, &eeValue);
if (res)
- return;
+ return res;
eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
efuse_buf = kmalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
if (!efuse_buf)
- return;
+ return -ENOMEM;
memset(efuse_buf, 0xFF, EFUSE_MAP_LEN_88E);
if (!(eeValue & BOOT_FROM_EEPROM) && !eeprom->bautoload_fail_flag) {
@@ -961,6 +933,7 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter)
ledpriv->bRegUseLed = true;
kfree(efuse_buf);
+ return 0;
}
void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
@@ -1069,7 +1042,7 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
rtw_write8(adapt, REG_RXTSF_OFFSET_CCK, 0x50);
rtw_write8(adapt, REG_RXTSF_OFFSET_OFDM, 0x50);
- _BeaconFunctionEnable(adapt, true, true);
+ _BeaconFunctionEnable(adapt);
rtw_resume_tx_beacon(adapt);
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index c1a4d023f627..7c72f5e04d9b 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -5,7 +5,6 @@
#include "../include/drv_types.h"
#include "../include/osdep_intf.h"
#include "../include/usb_ops.h"
-#include "../include/recv_osdep.h"
#include "../include/rtl8188e_hal.h"
static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
@@ -190,6 +189,20 @@ int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
return RTW_STATUS_CODE(ret);
}
+static void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ if (txrpt_ccx->int_ccx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
{
u8 *pbuf;
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
index 9e6f2361b090..4a0b782c33be 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
@@ -80,7 +80,6 @@ void rtl8188e_PHY_SetRFReg(struct adapter *adapter, u32 regaddr, u32 mask, u32 d
/* MAC/BB/RF HAL config */
int PHY_MACConfig8188E(struct adapter *adapter);
int PHY_BBConfig8188E(struct adapter *adapter);
-int PHY_RFConfig8188E(struct adapter *adapter);
/* BB TX Power R/W */
void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h b/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
index 8270fdbc2844..0a290bc31c4d 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
@@ -10,13 +10,13 @@
* AGC_TAB_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
/******************************************************************************
* PHY_REG_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
/******************************************************************************
* PHY_REG_PG.TXT
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h b/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
index 391c1754b0b6..b3d67c1a8050 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
@@ -7,7 +7,6 @@
/******************************************************************************
* MAC_REG.TXT
******************************************************************************/
-
-enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
+int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h b/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
index 0c67c3df20b9..880feadb4340 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
@@ -8,6 +8,6 @@
* RadioA_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index bba88a0ede61..1bd0c8f3a358 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -10,8 +10,6 @@
#ifndef __DRV_TYPES_H__
#define __DRV_TYPES_H__
-#define DRV_NAME "r8188eu"
-
#include "osdep_service.h"
#include "wlan_bssdef.h"
#include "rtw_ht.h"
@@ -36,10 +34,9 @@
#include "rtl8188e_hal.h"
#include "rtw_fw.h"
-#define DRIVERVERSION "v4.1.4_6773.20130222"
+#define FW_RTL8188EU "rtlwifi/rtl8188eufw.bin"
struct registry_priv {
- u8 chip_version;
u8 rfintfs;
u8 lbkmode;
u8 hci;
@@ -222,7 +219,7 @@ struct adapter {
#define adapter_to_dvobj(adapter) (adapter->dvobj)
-int rtw_handle_dualmac(struct adapter *adapter, bool init);
+void rtw_handle_dualmac(struct adapter *adapter, bool init);
static inline u8 *myid(struct eeprom_priv *peepriv)
{
diff --git a/drivers/staging/r8188eu/include/hal_com.h b/drivers/staging/r8188eu/include/hal_com.h
index d7e333f6ce39..cd3f845e146a 100644
--- a/drivers/staging/r8188eu/include/hal_com.h
+++ b/drivers/staging/r8188eu/include/hal_com.h
@@ -143,8 +143,4 @@ u8 MRateToHwRate(u8 rate);
void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg);
-bool Hal_MappingOutPipe(struct adapter *pAdapter, u8 NumOutPipe);
-
-s32 c2h_evt_read(struct adapter *adapter, u8 *buf);
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index ab6856d8a090..ac6e3f95c5b7 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -10,8 +10,8 @@
typedef s32 (*c2h_id_filter)(u8 id);
-void rtl8188eu_interface_configure(struct adapter *adapt);
-void ReadAdapterInfo8188EU(struct adapter *Adapter);
+int rtl8188eu_interface_configure(struct adapter *adapt);
+int ReadAdapterInfo8188EU(struct adapter *Adapter);
void rtl8188eu_init_default_value(struct adapter *adapt);
void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
@@ -39,7 +39,6 @@ void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
void rtw_hal_clone_data(struct adapter *dst_adapt,
struct adapter *src_adapt);
-void indicate_wx_scan_complete_event(struct adapter *padapter);
u8 rtw_do_join(struct adapter *padapter);
#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/r8188eu/include/ioctl_cfg80211.h b/drivers/staging/r8188eu/include/ioctl_cfg80211.h
deleted file mode 100644
index 738f645f9bbc..000000000000
--- a/drivers/staging/r8188eu/include/ioctl_cfg80211.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#ifndef __IOCTL_CFG80211_H__
-#define __IOCTL_CFG80211_H__
-
-struct rtw_wdev_invit_info {
- u8 token;
- u8 flags;
- u8 status;
- u8 req_op_ch;
- u8 rsp_op_ch;
-};
-
-#define rtw_wdev_invit_info_init(invit_info) \
- do { \
- (invit_info)->token = 0; \
- (invit_info)->flags = 0x00; \
- (invit_info)->status = 0xff; \
- (invit_info)->req_op_ch = 0; \
- (invit_info)->rsp_op_ch = 0; \
- } while (0)
-
-struct rtw_wdev_priv {
- struct wireless_dev *rtw_wdev;
-
- struct adapter *padapter;
-
- struct cfg80211_scan_request *scan_request;
- spinlock_t scan_req_lock;
-
- struct net_device *pmon_ndev;/* for monitor interface */
- char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
-
- u8 p2p_enabled;
-
- u8 provdisc_req_issued;
-
- struct rtw_wdev_invit_info invit_info;
-
- u8 bandroid_scan;
- bool block;
- bool power_mgmt;
-};
-
-#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
-
-#define wiphy_to_wdev(x) \
-((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
-
-int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
-void rtw_wdev_free(struct wireless_dev *wdev);
-void rtw_wdev_unregister(struct wireless_dev *wdev);
-
-void rtw_cfg80211_init_wiphy(struct adapter *padapter);
-
-void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
-
-void rtw_cfg80211_indicate_connect(struct adapter *padapter);
-void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted);
-
-void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
- unsigned char *da,
- unsigned short reason);
-
-void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
- const u8 *buf, size_t len);
-void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
- uint frame_len);
-void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
- uint frame_len, const char *msg);
-
-int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
- char *buf, int len, int type);
-
-bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
-
-#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
- cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
-#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
- cfg80211_send_rx_assoc(dev, bss, buf, len)
-
-#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/r8188eu/include/mlme_osdep.h b/drivers/staging/r8188eu/include/mlme_osdep.h
deleted file mode 100644
index 5b9f688f9424..000000000000
--- a/drivers/staging/r8188eu/include/mlme_osdep.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __MLME_OSDEP_H_
-#define __MLME_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-void rtw_init_mlme_timer(struct adapter *padapter);
-void rtw_os_indicate_disconnect(struct adapter *adapter);
-void rtw_os_indicate_connect(struct adapter *adapter);
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
-void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
-
-void rtw_reset_securitypriv(struct adapter *adapter);
-void indicate_wx_scan_complete_event(struct adapter *padapter);
-
-#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/r8188eu/include/odm_HWConfig.h b/drivers/staging/r8188eu/include/odm_HWConfig.h
index b37962edb2ed..3f7185780e87 100644
--- a/drivers/staging/r8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/r8188eu/include/odm_HWConfig.h
@@ -66,5 +66,4 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt);
-enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm);
#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
deleted file mode 100644
index 683fa4a07956..000000000000
--- a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_ODM_REGCONFIG_H_8188E
-#define __INC_ODM_REGCONFIG_H_8188E
-
-void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
- u32 Addr, u32 Data);
-
-void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data);
-
-void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_types.h b/drivers/staging/r8188eu/include/odm_types.h
index 08ba7a418ba8..76302df4b330 100644
--- a/drivers/staging/r8188eu/include/odm_types.h
+++ b/drivers/staging/r8188eu/include/odm_types.h
@@ -6,11 +6,6 @@
#define ODM_CE 0x04 /* BIT(2) */
-enum HAL_STATUS {
- HAL_STATUS_SUCCESS,
- HAL_STATUS_FAILURE,
-};
-
#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \
le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24))
#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \
diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h
index 0d7009269aab..36511c469546 100644
--- a/drivers/staging/r8188eu/include/osdep_intf.h
+++ b/drivers/staging/r8188eu/include/osdep_intf.h
@@ -39,6 +39,9 @@ The protection mechanism is through the pending queue.
u8 bio_timer_cancel;
};
+int netdev_open(struct net_device *pnetdev);
+int netdev_close(struct net_device *pnetdev);
+
u8 rtw_init_drv_sw(struct adapter *padapter);
u8 rtw_free_drv_sw(struct adapter *padapter);
u8 rtw_reset_drv_sw(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/recv_osdep.h b/drivers/staging/r8188eu/include/recv_osdep.h
deleted file mode 100644
index ca8a613508fd..000000000000
--- a/drivers/staging/r8188eu/include/recv_osdep.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RECV_OSDEP_H_
-#define __RECV_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void _rtw_free_recv_priv(struct recv_priv *precvpriv);
-
-s32 rtw_recv_entry(struct recv_frame *precv_frame);
-int rtw_recv_indicatepkt(struct adapter *adapter, struct recv_frame *recv_frame);
-void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
-
-void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
-
-int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void rtw_free_recv_priv(struct recv_priv *precvpriv);
-
-int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
-int rtw_os_recvbuf_resource_free(struct adapter *adapt, struct recv_buf *buf);
-
-void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
-int _netdev_open(struct net_device *pnetdev);
-int netdev_open(struct net_device *pnetdev);
-int netdev_close(struct net_device *pnetdev);
-
-#endif /* */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index 5cd62b216720..ed4091e7cc7e 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -23,7 +23,6 @@
#include "HalHWImg8188E_MAC.h"
#include "HalHWImg8188E_RF.h"
#include "HalHWImg8188E_BB.h"
-#include "odm_RegConfig8188E.h"
#include "odm_RTL8188E.h"
#define DRVINFO_SZ 4 /* unit is 8bytes */
@@ -36,7 +35,6 @@
0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
* WOLPattern(16*24)) */
-#define TX_SELE_HQ BIT(0) /* High Queue */
#define TX_SELE_LQ BIT(1) /* Low Queue */
#define TX_SELE_NQ BIT(2) /* Normal Queue */
@@ -51,12 +49,6 @@
#define TX_PAGE_BOUNDARY_88E (TX_TOTAL_PAGE_NUMBER_88E + 1)
-/* Note: For Normal Chip Setting ,modify later */
-#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER \
- TX_TOTAL_PAGE_NUMBER_88E /* 0xA9 , 0xb0=>176=>22k */
-#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
- (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
-
#include "HalVerDef.h"
#include "hal_com.h"
@@ -155,8 +147,7 @@ struct hal_data_8188e {
u8 AntDivCfg;
u8 TRxAntDivType;
- u8 OutEpQueueSel;
- u8 OutEpNumber;
+ u8 out_ep_extra_queues;
struct P2P_PS_Offload_t p2p_ps_offload;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
index b752c5c06309..dc4f358f646d 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_recv.h
@@ -33,8 +33,6 @@ enum rx_packet_type {
HIS_REPORT,/* USB HISR RPT */
};
-s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
-void rtl8188eu_free_recv_priv(struct adapter * padapter);
void rtl8188eu_recv_tasklet(unsigned long priv);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
void update_recvframe_attrib_88e(struct recv_frame *fra, struct recv_stat *stat);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_rf.h b/drivers/staging/r8188eu/include/rtl8188e_rf.h
index 04556496baad..63ac0acc68fd 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_rf.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_rf.h
@@ -8,7 +8,7 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-int PHY_RF6052_Config8188E(struct adapter *Adapter);
+int phy_RF6052_Config_ParaFile(struct adapter *Adapter);
void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
enum ht_channel_width Bandwidth);
void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 9e7b1f89037c..e34619140e33 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -924,15 +924,9 @@ Current IOREG MAP
#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* 2RQPN */
-#define _HPQ(x) ((x) & 0xFF)
-#define _LPQ(x) (((x) & 0xFF) << 8)
-#define _PUBQ(x) (((x) & 0xFF) << 16)
-/* NOTE: in RQPN_NPQ register */
-#define _NPQ(x) ((x) & 0xFF)
-
-#define HPQ_PUBLIC_DIS BIT(24)
-#define LPQ_PUBLIC_DIS BIT(25)
+
+#define NUM_HQ 0x29
+
#define LD_RQPN BIT(31)
/* 2TDECTRL */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_xmit.h b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
index 8adb672f7a07..6db7fabebea9 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
@@ -83,12 +83,6 @@
/* OFFSET 20 */
#define RTY_LMT_EN BIT(17)
-enum TXDESC_SC {
- SC_DONT_CARE = 0x00,
- SC_UPPER = 0x01,
- SC_LOWER = 0x02,
- SC_DUPLICATE = 0x03
-};
/* OFFSET 20 */
#define SGI BIT(6)
#define USB_TXAGG_NUM_SHT 24
@@ -147,6 +141,4 @@ bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
-void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
-
#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_ap.h b/drivers/staging/r8188eu/include/rtw_ap.h
index 724229fe84aa..8b4134eb3095 100644
--- a/drivers/staging/r8188eu/include/rtw_ap.h
+++ b/drivers/staging/r8188eu/include/rtw_ap.h
@@ -10,8 +10,6 @@
/* external function */
void rtw_indicate_sta_assoc_event(struct adapter *padapter,
struct sta_info *psta);
-void rtw_indicate_sta_disassoc_event(struct adapter *padapter,
- struct sta_info *psta);
void init_mlme_ap_info(struct adapter *padapter);
void free_mlme_ap_info(struct adapter *padapter);
void update_beacon(struct adapter *padapter, u8 ie_id,
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index 6b6d560d7143..9a76aa85de94 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -730,9 +730,7 @@ Result:
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
- int ssid_num, struct rtw_ieee80211_channel *ch,
- int ch_num);
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num);
u8 rtw_createbss_cmd(struct adapter *padapter);
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index d6b0c1c2f9a2..8520f022a67f 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -21,20 +21,15 @@ enum LED_CTL_MODE {
};
enum LED_STATE_871x {
- LED_UNKNOWN = 0,
- RTW_LED_ON = 1,
RTW_LED_OFF = 2,
LED_BLINK_NORMAL = 3,
LED_BLINK_SLOWLY = 4,
LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
* the # of times to blink is depend on time
* for scanning. */
- LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
- * Server case */
LED_BLINK_TXRX = 9,
LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
LED_BLINK_WPS_STOP = 11,
- LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
};
struct led_priv {
@@ -43,8 +38,6 @@ struct led_priv {
bool bRegUseLed;
enum LED_STATE_871x CurrLedState; /* Current LED state. */
- enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
- * either RTW_LED_ON or RTW_LED_OFF are. */
bool bLedOn; /* true if LED is ON, false if LED is OFF. */
@@ -54,7 +47,6 @@ struct led_priv {
u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
- bool bLedNoLinkBlinkInProgress;
bool bLedLinkBlinkInProgress;
bool bLedScanBlinkInProgress;
struct delayed_work blink_work;
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index d81668498e46..b69989cbab21 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -5,7 +5,6 @@
#define __RTW_MLME_H_
#include "osdep_service.h"
-#include "mlme_osdep.h"
#include "drv_types.h"
#include "wlan_bssdef.h"
@@ -64,17 +63,6 @@ enum rt_scan_type {
SCAN_MIX,
};
-enum SCAN_RESULT_TYPE {
- SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
- SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
- * include AP. */
- SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
- * device. */
- /* If this device is Miracast sink
- * device, it will just return all the
- * Miracast source devices. */
-};
-
/*
there are several "locks" in mlme_priv,
since mlme_priv is a shared resource between many threads,
@@ -433,8 +421,6 @@ void indicate_wx_scan_complete_event(struct adapter *padapter);
void rtw_indicate_wx_assoc_event(struct adapter *padapter);
void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
int event_thread(void *context);
-void rtw_join_timeout_handler (struct timer_list *t);
-void _rtw_scan_timeout_handler (struct timer_list *t);
void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
int rtw_init_mlme_priv(struct adapter *adapter);
void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
@@ -537,7 +523,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
-void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_indicate_scan_done(struct adapter *padapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
uint in_len);
@@ -551,10 +537,6 @@ void _rtw_join_timeout_handler(struct adapter *adapter);
void rtw_scan_timeout_handler(struct adapter *adapter);
void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
-#define rtw_is_scan_deny(adapter) false
-#define rtw_clear_scan_deny(adapter) do {} while (0)
-#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
-#define rtw_set_scan_deny(adapter, ms) do {} while (0)
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 343ce1ce4b3d..b322d0848db9 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -388,15 +388,11 @@ struct mlme_ext_priv {
void init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
-extern void init_mlme_ext_timer(struct adapter *padapter);
-extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
unsigned char networktype_to_raid(unsigned char network_type);
u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
-void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
-void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
void Save_DM_Func_Flag(struct adapter *padapter);
void Restore_DM_Func_Flag(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 66d240a7123d..7768b0c5988c 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -243,6 +243,9 @@ struct recv_frame {
struct recv_reorder_ctrl *preorder_ctrl;
};
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void _rtw_free_recv_priv(struct recv_priv *precvpriv);
+s32 rtw_recv_entry(struct recv_frame *precv_frame);
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
int rtw_free_recvframe(struct recv_frame *precvframe,
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index 034a9f8f51c9..82efcd54af3f 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -7,6 +7,9 @@
#include "osdep_service.h"
#include "drv_types.h"
+#define NR_XMITFRAME 256
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME * 2 / 5)
+
#define MAX_XMITBUF_SZ (20480) /* 20k */
#define NR_XMITBUFF (4)
@@ -304,6 +307,15 @@ struct xmit_priv {
struct submit_ctx ack_tx_ops;
};
+struct pkt_file {
+ struct sk_buff *pkt;
+ size_t pkt_len; /* the remainder length of the open_file */
+ unsigned char *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ size_t buf_len;
+};
+
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
@@ -355,7 +367,7 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
-/* include after declaring struct xmit_buf, in order to avoid warning */
-#include "xmit_osdep.h"
+void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe);
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/r8188eu/include/wlan_bssdef.h b/drivers/staging/r8188eu/include/wlan_bssdef.h
index 9d1c9e763287..81bda91a4136 100644
--- a/drivers/staging/r8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/r8188eu/include/wlan_bssdef.h
@@ -133,10 +133,6 @@ struct ndis_802_11_assoc_info {
u32 OffsetResponseIEs;
};
-enum ndis_802_11_reload_def {
- Ndis802_11ReloadWEPKeys
-};
-
/* Key mapping keys require a BSSID */
struct ndis_802_11_key {
u32 Length; /* Length of this structure */
diff --git a/drivers/staging/r8188eu/include/xmit_osdep.h b/drivers/staging/r8188eu/include/xmit_osdep.h
deleted file mode 100644
index 00658681fef9..000000000000
--- a/drivers/staging/r8188eu/include/xmit_osdep.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __XMIT_OSDEP_H_
-#define __XMIT_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-struct pkt_file {
- struct sk_buff *pkt;
- size_t pkt_len; /* the remainder length of the open_file */
- unsigned char *cur_buffer;
- u8 *buf_start;
- u8 *cur_addr;
- size_t buf_len;
-};
-
-extern int rtw_ht_enable;
-extern int rtw_cbw40_enable;
-extern int rtw_ampdu_enable;/* for enable tx_ampdu */
-
-#define NR_XMITFRAME 256
-
-struct xmit_priv;
-struct pkt_attrib;
-struct sta_xmit_priv;
-struct xmit_frame;
-struct xmit_buf;
-
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
-
-void rtw_os_xmit_schedule(struct adapter *padapter);
-
-int rtw_os_xmit_resource_alloc(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz);
-
-uint rtw_remainder_len(struct pkt_file *pfile);
-void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
-uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
-bool rtw_endofpktfile(struct pkt_file *pfile);
-
-void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
-void rtw_os_xmit_complete(struct adapter *padapter,
- struct xmit_frame *pxframe);
-
-#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 7f91dac2e41b..2de2e1e32738 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1099,7 +1099,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&pmlmepriv->lock);
- _status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
+ _status = rtw_sitesurvey_cmd(padapter, ssid, 1);
spin_unlock_bh(&pmlmepriv->lock);
}
@@ -1836,7 +1836,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
goto out;
}
- strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strscpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
@@ -2079,7 +2079,7 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- int ret = 0;
+ int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2094,10 +2094,9 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
else if (*extra == '3')
init_role = P2P_ROLE_GO;
- if (_FAIL == rtw_p2p_enable(padapter, init_role)) {
- ret = -EFAULT;
- goto exit;
- }
+ ret = rtw_p2p_enable(padapter, init_role);
+ if (ret)
+ return ret;
/* set channel/bandwidth */
if (init_role != P2P_ROLE_DISABLE) {
@@ -2121,8 +2120,7 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
set_channel_bwmode(padapter, channel, ch_offset, bwmode);
}
-exit:
- return ret;
+ return 0;
}
static void rtw_p2p_set_go_nego_ssid(struct net_device *dev,
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
deleted file mode 100644
index 899d8e9c3834..000000000000
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ /dev/null
@@ -1,205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#define _MLME_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/mlme_osdep.h"
-
-void rtw_join_timeout_handler (struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
-
- _rtw_join_timeout_handler(adapter);
-}
-
-void _rtw_scan_timeout_handler (struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.scan_to_timer);
-
- rtw_scan_timeout_handler(adapter);
-}
-
-static void _dynamic_check_timer_handlder(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
-
- rtw_dynamic_check_timer_handlder(adapter);
- _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
-}
-
-void rtw_init_mlme_timer(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- timer_setup(&pmlmepriv->assoc_timer, rtw_join_timeout_handler, 0);
- timer_setup(&pmlmepriv->scan_to_timer, _rtw_scan_timeout_handler, 0);
- timer_setup(&pmlmepriv->dynamic_chk_timer, _dynamic_check_timer_handlder, 0);
-}
-
-void rtw_os_indicate_connect(struct adapter *adapter)
-{
-
- rtw_indicate_wx_assoc_event(adapter);
- netif_carrier_on(adapter->pnetdev);
- if (adapter->pid[2] != 0)
- rtw_signal_process(adapter->pid[2], SIGALRM);
-
-}
-
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
-{
- indicate_wx_scan_complete_event(padapter);
-}
-
-static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
-
-void rtw_reset_securitypriv(struct adapter *adapter)
-{
- u8 backup_index = 0;
- u8 backup_counter = 0x00;
- u32 backup_time = 0;
-
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- /* 802.1x */
- /* We have to backup the PMK information for WiFi PMK Caching test item. */
- /* Backup the btkip_countermeasure information. */
- /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- backup_index = adapter->securitypriv.PMKIDIndex;
- backup_counter = adapter->securitypriv.btkip_countermeasure;
- backup_time = adapter->securitypriv.btkip_countermeasure_time;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
-
- /* Restore the PMK information to securitypriv structure for the following connection. */
- memcpy(&adapter->securitypriv.PMKIDList[0],
- &backup_pmkid[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- adapter->securitypriv.PMKIDIndex = backup_index;
- adapter->securitypriv.btkip_countermeasure = backup_counter;
- adapter->securitypriv.btkip_countermeasure_time = backup_time;
- adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
- } else {
- /* reset values in securitypriv */
- struct security_priv *psec_priv = &adapter->securitypriv;
-
- psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psec_priv->dot11PrivacyKeyIndex = 0;
- psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- psec_priv->dot118021XGrpKeyid = 1;
- psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
- psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
- }
-}
-
-void rtw_os_indicate_disconnect(struct adapter *adapter)
-{
-
- netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
- rtw_indicate_wx_disassoc_event(adapter);
- rtw_reset_securitypriv(adapter);
-}
-
-void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
-{
- uint len;
- u8 *buff, *p, i;
- union iwreq_data wrqu;
-
- buff = NULL;
- if (authmode == _WPA_IE_ID_) {
- buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (!buff)
- return;
- p = buff;
- p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1] + 2;
- len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0; i < len; i++)
- p += sprintf(p, "%02x", sec_ie[i]);
- p += sprintf(p, ")");
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = p - buff;
- wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
- wrqu.data.length : IW_CUSTOM_MAX;
- wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
- kfree(buff);
- }
-}
-
-static void _survey_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.survey_timer);
-
- survey_timer_hdl(padapter);
-}
-
-static void _link_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.link_timer);
- link_timer_hdl(padapter);
-}
-
-static void _addba_timer_hdl(struct timer_list *t)
-{
- struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
- addba_timer_hdl(psta);
-}
-
-void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
-{
- timer_setup(&psta->addba_retry_timer, _addba_timer_hdl, 0);
-}
-
-void init_mlme_ext_timer(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- timer_setup(&pmlmeext->survey_timer, _survey_timer_hdl, 0);
- timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
-}
-
-void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
-}
-
-void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
-}
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index aa100b5141e1..6a45315d01a2 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/xmit_osdep.h"
-#include "../include/recv_osdep.h"
#include "../include/hal_intf.h"
#include "../include/rtw_ioctl.h"
#include "../include/usb_osintf.h"
@@ -17,14 +15,12 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
-MODULE_VERSION(DRIVERVERSION);
-MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
+MODULE_FIRMWARE(FW_RTL8188EU);
#define CONFIG_BR_EXT_BRNAME "br0"
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
/* module param defaults */
-static int rtw_chip_version = 0x00;
static int rtw_rfintfs = HWPI;
static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
@@ -67,9 +63,9 @@ static int rtw_uapsd_acvo_en;
static int rtw_led_enable = 1;
-int rtw_ht_enable = 1;
-int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
-int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
+static int rtw_ht_enable = 1;
+static int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
+static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
@@ -106,7 +102,6 @@ char *rtw_initmac; /* temp mac address if users want to use instead of the mac
module_param(rtw_initmac, charp, 0644);
module_param(rtw_channel_plan, int, 0644);
-module_param(rtw_chip_version, int, 0644);
module_param(rtw_rfintfs, int, 0644);
module_param(rtw_lbkmode, int, 0644);
module_param(rtw_network_mode, int, 0644);
@@ -153,7 +148,6 @@ static uint loadparam(struct adapter *padapter)
{
struct registry_priv *registry_par = &padapter->registrypriv;
- registry_par->chip_version = (u8)rtw_chip_version;
registry_par->rfintfs = (u8)rtw_rfintfs;
registry_par->lbkmode = (u8)rtw_lbkmode;
registry_par->network_mode = (u8)rtw_network_mode;
@@ -622,7 +616,7 @@ void netdev_br_init(struct net_device *netdev)
rcu_read_unlock();
}
-int _netdev_open(struct net_device *pnetdev)
+static int _netdev_open(struct net_device *pnetdev)
{
uint status;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -636,7 +630,7 @@ int _netdev_open(struct net_device *pnetdev)
if (status == _FAIL)
goto netdev_open_error;
- pr_info("MAC Address = %pM\n", pnetdev->dev_addr);
+ netdev_dbg(pnetdev, "MAC Address = %pM\n", pnetdev->dev_addr);
status = rtw_start_drv_threads(padapter);
if (status == _FAIL) {
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 3504a0a9ba87..88271f956b52 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -5,7 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
#include "../include/rtw_ioctl_set.h"
/*
@@ -54,14 +53,13 @@ struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
if (!pnetdev)
- goto RETURN;
+ return NULL;
pnetdev->dev.type = &wlan_type;
pnpi = netdev_priv(pnetdev);
pnpi->priv = old_priv;
pnpi->sizeof_priv = sizeof_priv;
-RETURN:
return pnetdev;
}
@@ -72,19 +70,18 @@ struct net_device *rtw_alloc_etherdev(int sizeof_priv)
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
if (!pnetdev)
- goto RETURN;
+ return NULL;
pnpi = netdev_priv(pnetdev);
pnpi->priv = vzalloc(sizeof_priv);
if (!pnpi->priv) {
free_netdev(pnetdev);
- pnetdev = NULL;
- goto RETURN;
+ return NULL;
}
pnpi->sizeof_priv = sizeof_priv;
-RETURN:
+
return pnetdev;
}
diff --git a/drivers/staging/r8188eu/os_dep/recv_linux.c b/drivers/staging/r8188eu/os_dep/recv_linux.c
deleted file mode 100644
index 1e14b6d49795..000000000000
--- a/drivers/staging/r8188eu/os_dep/recv_linux.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RECV_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-
-#include "../include/wifi.h"
-#include "../include/recv_osdep.h"
-
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-
-/* alloc os related resource in struct recv_buf */
-int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
- struct recv_buf *precvbuf)
-{
- int res = _SUCCESS;
-
- precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (!precvbuf->purb)
- res = _FAIL;
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- return res;
-}
-
-/* free os related resource in struct recv_buf */
-int rtw_os_recvbuf_resource_free(struct adapter *padapter,
- struct recv_buf *precvbuf)
-{
- usb_free_urb(precvbuf->purb);
- return _SUCCESS;
-}
-
-void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- u32 cur_time = 0;
-
- if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = jiffies;
- } else {
- cur_time = jiffies;
-
- if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
- psecuritypriv->btkip_countermeasure = true;
- psecuritypriv->last_mic_err_time = 0;
- psecuritypriv->btkip_countermeasure_time = cur_time;
- } else {
- psecuritypriv->last_mic_err_time = jiffies;
- }
- }
-
- memset(&ev, 0x00, sizeof(ev));
- if (bgroup)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
- memset(&wrqu, 0x00, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
- &wrqu, (char *)&ev);
-}
-
-int rtw_recv_indicatepkt(struct adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct recv_priv *precvpriv;
- struct __queue *pfree_recv_queue;
- struct sk_buff *skb;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- precvpriv = &padapter->recvpriv;
- pfree_recv_queue = &precvpriv->free_recv_queue;
-
- skb = precv_frame->pkt;
- if (!skb)
- goto _recv_indicatepkt_drop;
-
- skb->data = precv_frame->rx_data;
-
- skb_set_tail_pointer(skb, precv_frame->len);
-
- skb->len = precv_frame->len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sk_buff *pskb2 = NULL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- bool bmcast = is_multicast_ether_addr(pattrib->dst);
-
- if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
- ETH_ALEN)) {
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo(padapter);
- pskb2 = skb_clone(skb, GFP_ATOMIC);
- } else {
- psta = rtw_get_stainfo(pstapriv, pattrib->dst);
- }
-
- if (psta) {
- struct net_device *pnetdev;
-
- pnetdev = (struct net_device *)padapter->pnetdev;
- skb->dev = pnetdev;
- skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
-
- rtw_xmit_entry(skb, pnetdev);
-
- if (bmcast)
- skb = pskb2;
- else
- goto _recv_indicatepkt_end;
- }
- }
- }
-
- rcu_read_lock();
- rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->dev = padapter->pnetdev;
- skb->protocol = eth_type_trans(skb, padapter->pnetdev);
-
- netif_rx(skb);
-
-_recv_indicatepkt_end:
-
- /* pointers to NULL before rtw_free_recvframe() */
- precv_frame->pkt = NULL;
-
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _SUCCESS;
-
-_recv_indicatepkt_drop:
-
- /* enqueue back to free_recv_queue */
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _FAIL;
-}
-
-static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
-{
- struct recv_reorder_ctrl *preorder_ctrl;
-
- preorder_ctrl = from_timer(preorder_ctrl, t, reordering_ctrl_timer);
- rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
-}
-
-void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
-{
- timer_setup(&preorder_ctrl->reordering_ctrl_timer, _rtw_reordering_ctrl_timeout_handler, 0);
-}
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index 9147d176da4f..5fbfbcd95de2 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -4,8 +4,6 @@
#include <linux/usb.h>
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
#include "../include/osdep_intf.h"
#include "../include/usb_ops.h"
@@ -55,7 +53,7 @@ struct rtw_usb_drv {
};
static struct rtw_usb_drv rtl8188e_usb_drv = {
- .usbdrv.name = "r8188eu",
+ .usbdrv.name = KBUILD_MODNAME,
.usbdrv.probe = rtw_drv_init,
.usbdrv.disconnect = rtw_dev_remove,
.usbdrv.id_table = rtw_usb_id_tbl,
@@ -232,7 +230,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
mutex_unlock(&pwrpriv->lock);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- rtw_indicate_scan_done(padapter, 1);
+ rtw_indicate_scan_done(padapter);
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
rtw_indicate_disconnect(padapter);
@@ -288,17 +286,17 @@ exit:
* We accept the new device by returning 0.
*/
-static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
- struct usb_interface *pusb_intf)
+static int rtw_usb_if1_init(struct dvobj_priv *dvobj, struct usb_interface *pusb_intf)
{
struct adapter *padapter = NULL;
struct net_device *pnetdev = NULL;
struct io_priv *piopriv;
struct intf_hdl *pintf;
+ int ret;
padapter = vzalloc(sizeof(*padapter));
if (!padapter)
- return NULL;
+ return -ENOMEM;
padapter->dvobj = dvobj;
dvobj->if1 = padapter;
@@ -307,12 +305,13 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
- if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
- goto free_adapter;
+ rtw_handle_dualmac(padapter, 1);
pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev)
+ if (!pnetdev) {
+ ret = -ENODEV;
goto handle_dualmac;
+ }
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
@@ -330,14 +329,20 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
rtl8188e_read_chip_version(padapter);
/* step usb endpoint mapping */
- rtl8188eu_interface_configure(padapter);
+ ret = rtl8188eu_interface_configure(padapter);
+ if (ret)
+ goto handle_dualmac;
/* step read efuse/eeprom data and get mac_addr */
- ReadAdapterInfo8188EU(padapter);
+ ret = ReadAdapterInfo8188EU(padapter);
+ if (ret)
+ goto handle_dualmac;
/* step 5. */
- if (rtw_init_drv_sw(padapter) == _FAIL)
+ if (rtw_init_drv_sw(padapter) == _FAIL) {
+ ret = -ENODEV;
goto handle_dualmac;
+ }
#ifdef CONFIG_PM
if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
@@ -352,7 +357,8 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
usb_autopm_get_interface(pusb_intf);
/* alloc dev name after read efuse. */
- if (rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname) < 0)
+ ret = rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
+ if (ret)
goto free_drv_sw;
rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
@@ -360,23 +366,23 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
/* step 6. Tell the network stack we exist */
- if (register_netdev(pnetdev) != 0)
+ ret = register_netdev(pnetdev);
+ if (ret)
goto free_drv_sw;
- return padapter;
+ return 0;
free_drv_sw:
rtw_cancel_all_timer(padapter);
rtw_free_drv_sw(padapter);
handle_dualmac:
rtw_handle_dualmac(padapter, 0);
-free_adapter:
if (pnetdev)
rtw_free_netdev(pnetdev);
else
vfree(padapter);
- return NULL;
+ return ret;
}
static void rtw_usb_if1_deinit(struct adapter *if1)
@@ -404,27 +410,24 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
{
- struct adapter *if1 = NULL;
struct dvobj_priv *dvobj;
+ int ret;
/* Initialize dvobj_priv */
dvobj = usb_dvobj_init(pusb_intf);
if (!dvobj)
- goto err;
+ return -ENODEV;
- if1 = rtw_usb_if1_init(dvobj, pusb_intf);
- if (!if1)
- goto free_dvobj;
+ ret = rtw_usb_if1_init(dvobj, pusb_intf);
+ if (ret) {
+ usb_dvobj_deinit(pusb_intf);
+ return ret;
+ }
if (ui_pid[1] != 0)
rtw_signal_process(ui_pid[1], SIGUSR2);
return 0;
-
-free_dvobj:
- usb_dvobj_deinit(pusb_intf);
-err:
- return -ENODEV;
}
/*
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
deleted file mode 100644
index 91a1e4e3219a..000000000000
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _XMIT_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/mlme_osdep.h"
-#include "../include/xmit_osdep.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_osintf.h"
-
-uint rtw_remainder_len(struct pkt_file *pfile)
-{
- return pfile->buf_len - ((size_t)(pfile->cur_addr) -
- (size_t)(pfile->buf_start));
-}
-
-void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
-{
-
- if (!pktptr) {
- pr_err("8188eu: pktptr is NULL\n");
- return;
- }
- if (!pfile) {
- pr_err("8188eu: pfile is NULL\n");
- return;
- }
- pfile->pkt = pktptr;
- pfile->cur_addr = pktptr->data;
- pfile->buf_start = pktptr->data;
- pfile->pkt_len = pktptr->len;
- pfile->buf_len = pktptr->len;
-
- pfile->cur_buffer = pfile->buf_start;
-
-}
-
-uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
-{
- uint len = 0;
-
- len = rtw_remainder_len(pfile);
- len = (rlen > len) ? len : rlen;
-
- if (rmem)
- skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
-
- pfile->cur_addr += len;
- pfile->pkt_len -= len;
-
- return len;
-}
-
-bool rtw_endofpktfile(struct pkt_file *pfile)
-{
-
- if (pfile->pkt_len == 0) {
-
- return true;
- }
-
- return false;
-}
-
-int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
-{
- pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
- if (!pxmitbuf->pallocated_buf)
- return _FAIL;
-
- pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
- pxmitbuf->dma_transfer_addr = 0;
-
- pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb)
- return _FAIL;
-
- return _SUCCESS;
-}
-
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz)
-{
- usb_free_urb(pxmitbuf->pxmit_urb);
-
- kfree(pxmitbuf->pallocated_buf);
-}
-
-#define WMM_XMIT_THRESHOLD (NR_XMITFRAME * 2 / 5)
-
-void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
-{
- u16 queue;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
- (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
- netif_wake_subqueue(padapter->pnetdev, queue);
- } else {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue))
- netif_wake_subqueue(padapter->pnetdev, queue);
- }
-
- dev_kfree_skb_any(pkt);
-}
-
-void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
-{
- if (pxframe->pkt)
- rtw_os_pkt_complete(padapter, pxframe->pkt);
- pxframe->pkt = NULL;
-}
-
-void rtw_os_xmit_schedule(struct adapter *padapter)
-{
- struct xmit_priv *pxmitpriv;
-
- if (!padapter)
- return;
-
- pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- if (rtw_txframes_pending(padapter))
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u16 queue;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- /* No free space for Tx, tx_worker is too slow */
- if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
- netif_stop_subqueue(padapter->pnetdev, queue);
- } else {
- if (pxmitpriv->free_xmitframe_cnt <= 4) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
- netif_stop_subqueue(padapter->pnetdev, queue);
- }
- }
-}
-
-static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct list_head *phead, *plist;
- struct sk_buff *newskb;
- struct sta_info *psta = NULL;
- s32 res;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- /* avoid come from STA1 and send back STA1 */
- if (!memcmp(psta->hwaddr, &skb->data[6], 6))
- continue;
-
- newskb = skb_copy(skb, GFP_ATOMIC);
-
- if (newskb) {
- memcpy(newskb->data, psta->hwaddr, 6);
- res = rtw_xmit(padapter, &newskb);
- if (res < 0) {
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(newskb);
- } else {
- pxmitpriv->tx_pkts++;
- }
- } else {
- pxmitpriv->tx_drop++;
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- return false; /* Caller shall tx this multicast frame via normal way. */
- }
- }
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- dev_kfree_skb_any(skb);
- return true;
-}
-
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- s32 res = 0;
-
- if (!rtw_if_up(padapter))
- goto drop_packet;
-
- rtw_check_xmit_resource(padapter, pkt);
-
- if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
- (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
- (padapter->registrypriv.wifi_spec == 0)) {
- if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
- res = rtw_mlcst2unicst(padapter, pkt);
- if (res)
- goto exit;
- }
- }
-
- res = rtw_xmit(padapter, &pkt);
- if (res < 0)
- goto drop_packet;
-
- pxmitpriv->tx_pkts++;
- goto exit;
-
-drop_packet:
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(pkt);
-
-exit:
-
- return 0;
-}
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 39f5a6a7346a..e06c189b4ce4 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -10,6 +10,9 @@ config RTLLIB
If unsure, say N.
+ This driver adds support for rtllib wireless cards.
+ Only the rtl8192e is supported as of now.
+
if RTLLIB
config RTLLIB_CRYPTO_CCMP
@@ -23,6 +26,8 @@ config RTLLIB_CRYPTO_CCMP
CCMP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the CCM mode Protocol crypto driver for
+ use in wireless cards (including rtllib cards).
config RTLLIB_CRYPTO_TKIP
tristate "Support for rtllib TKIP crypto"
@@ -35,6 +40,8 @@ config RTLLIB_CRYPTO_TKIP
TKIP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the Temporal Key Integrity Protocol for
+ the IEEE 802.11i standard for use on wireless cards.
config RTLLIB_CRYPTO_WEP
tristate "Support for rtllib WEP crypto"
@@ -42,9 +49,12 @@ config RTLLIB_CRYPTO_WEP
depends on RTLLIB
default y
help
- TKIP crypto driver for rtllib.
+ WEP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the (now weak) Wired Equivalent Privacy
+ (WEP) crypto protocol for wireless cards.
+ NOTE: This protocol is now considered insecure.
source "drivers/staging/rtl8192e/rtl8192e/Kconfig"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 4abec7b42993..ab2e9b729883 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -10,7 +10,7 @@
#include "r8190P_rtl8256.h"
void rtl92e_set_bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth)
+ enum ht_channel_width bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -25,7 +25,7 @@ void rtl92e_set_bandwidth(struct net_device *dev,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
- switch (Bandwidth) {
+ switch (bandwidth) {
case HT_CHANNEL_WIDTH_20:
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0b, bMask12Bits, 0x100);
@@ -44,7 +44,7 @@ void rtl92e_set_bandwidth(struct net_device *dev,
break;
default:
netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
- __func__, Bandwidth);
+ __func__, bandwidth);
break;
}
}
@@ -115,10 +115,6 @@ bool rtl92e_config_rf(struct net_device *dev)
(enum rf90_radio_path)eRFPath,
RegOffSetToBeCheck,
bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
RetryTimes--;
}
@@ -142,8 +138,6 @@ bool rtl92e_config_rf(struct net_device *dev)
goto fail;
}
}
-
- RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
return true;
fail:
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 4cb483f1a152..3c52e2b43095 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -9,7 +9,7 @@
#define RTL819X_TOTAL_RF_PATH 2
void rtl92e_set_bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth);
+ enum ht_channel_width bandwidth);
bool rtl92e_config_rf(struct net_device *dev);
void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel);
void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index cd8bbc358d01..8bf06f736ffb 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -21,8 +21,6 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
- RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, len);
-
do {
if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) {
frag_length = CMDPACKET_FRAG_SIZE;
@@ -61,8 +59,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
tcb_desc->txbuf_size = frag_length;
}
- seg_ptr = skb_put(skb, frag_length);
- memcpy(seg_ptr, data, (u32)frag_length);
+ skb_put_data(skb, data, frag_length);
if (type == DESC_PACKET_TYPE_INIT &&
(!priv->rtllib->check_nic_enough_desc(dev, TXCMD_QUEUE) ||
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 4b9249195b5a..18e4e5d84878 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -186,8 +186,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET);
- RT_TRACE(COMP_DBG, "%s():HW_VAR_AC_PARAM eACI:%x:%x\n",
- __func__, eACI, u4bAcParam);
switch (eACI) {
case AC1_BK:
rtl92e_writel(dev, EDCAPARA_BK, u4bAcParam);
@@ -226,8 +224,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
u8 acm = pAciAifsn->f.acm;
u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
- RT_TRACE(COMP_DBG, "===========>%s():HW_VAR_ACM_CTRL:%x\n",
- __func__, eACI);
AcmCtrl = AcmCtrl | ((priv->AcmMethod == 2) ? 0x0 : 0x1);
if (acm) {
@@ -243,12 +239,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case AC3_VO:
AcmCtrl |= AcmHw_VoqEn;
break;
-
- default:
- RT_TRACE(COMP_QOS,
- "SetHwReg8185(): [HW_VAR_ACM_CTRL] acm set failed: eACI is %d\n",
- eACI);
- break;
}
} else {
switch (eACI) {
@@ -268,10 +258,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
break;
}
}
-
- RT_TRACE(COMP_QOS,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- AcmCtrl);
rtl92e_writeb(dev, AcmHwCtrl, AcmCtrl);
break;
}
@@ -304,8 +290,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
u16 i, usValue, IC_Version;
u16 EEPROMId;
- RT_TRACE(COMP_INIT, "====> %s\n", __func__);
-
EEPROMId = rtl92e_eeprom_read(dev, 0);
if (EEPROMId != RTL8190_EEPROM_ID) {
netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
@@ -329,8 +313,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
ICVer8192 = IC_Version & 0xf;
ICVer8256 = (IC_Version & 0xf0)>>4;
- RT_TRACE(COMP_INIT, "\nICVer8192 = 0x%x\n", ICVer8192);
- RT_TRACE(COMP_INIT, "\nICVer8256 = 0x%x\n", ICVer8256);
if (ICVer8192 == 0x2) {
if (ICVer8256 == 0x5)
priv->card_8192_version = VERSION_8190_BE;
@@ -343,22 +325,14 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->card_8192_version = VERSION_8190_BD;
break;
}
- RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n",
- priv->card_8192_version);
} else {
priv->card_8192_version = VERSION_8190_BD;
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
priv->eeprom_CustomerID = 0;
priv->eeprom_ChannelPlan = 0;
- RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n", 0xff);
}
- RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
- RT_TRACE(COMP_INIT, "EEPROM DID = 0x%4x\n", priv->eeprom_did);
- RT_TRACE(COMP_INIT, "EEPROM Customer ID: 0x%2x\n",
- priv->eeprom_CustomerID);
-
if (!priv->AutoloadFailFlag) {
u8 addr[ETH_ALEN];
@@ -372,9 +346,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
eth_hw_addr_set(dev, bMac_Tmp_Addr);
}
- RT_TRACE(COMP_INIT, "Permanent Address = %pM\n",
- dev->dev_addr);
-
if (priv->card_8192_version > VERSION_8190_BD)
priv->bTXPowerDataReadFromEEPORM = true;
else
@@ -395,8 +366,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
} else {
priv->EEPROMLegacyHTTxPowerDiff = 0x04;
}
- RT_TRACE(COMP_INIT, "EEPROMLegacyHTTxPowerDiff = %d\n",
- priv->EEPROMLegacyHTTxPowerDiff);
if (!priv->AutoloadFailFlag)
priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
@@ -404,8 +373,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
0xff00) >> 8;
else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
- priv->EEPROMThermalMeter);
priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
if (priv->epromtype == EEPROM_93C46) {
@@ -421,10 +388,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->EEPROMCrystalCap =
EEPROM_Default_TxPwDiff_CrystalCap;
}
- RT_TRACE(COMP_INIT, "EEPROMAntPwDiff = %d\n",
- priv->EEPROMAntPwDiff);
- RT_TRACE(COMP_INIT, "EEPROMCrystalCap = %d\n",
- priv->EEPROMCrystalCap);
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
@@ -434,12 +397,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelCCK[i])) =
usValue;
- RT_TRACE(COMP_INIT,
- "CCK Tx Power Level, Index %d = 0x%02x\n",
- i, priv->EEPROMTxPowerLevelCCK[i]);
- RT_TRACE(COMP_INIT,
- "CCK Tx Power Level, Index %d = 0x%02x\n",
- i+1, priv->EEPROMTxPowerLevelCCK[i+1]);
}
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
@@ -449,13 +406,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[i]))
= usValue;
- RT_TRACE(COMP_INIT,
- "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n",
- i, priv->EEPROMTxPowerLevelOFDM24G[i]);
- RT_TRACE(COMP_INIT,
- "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n",
- i + 1,
- priv->EEPROMTxPowerLevelOFDM24G[i+1]);
}
}
if (priv->epromtype == EEPROM_93C46) {
@@ -508,22 +458,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->TxPowerLevelOFDM24G_C[i] =
priv->EEPROMRfCOfdmChnlTxPwLevel[2];
}
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelCCK_A[%d] = 0x%x\n",
- i, priv->TxPowerLevelCCK_A[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelOFDM24G_A[%d] = 0x%x\n",
- i, priv->TxPowerLevelOFDM24G_A[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelCCK_C[%d] = 0x%x\n",
- i, priv->TxPowerLevelCCK_C[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelOFDM24G_C[%d] = 0x%x\n",
- i, priv->TxPowerLevelOFDM24G_C[i]);
priv->LegacyHTTxPowerDiff =
priv->EEPROMLegacyHTTxPowerDiff;
priv->AntennaTxPwDiff[0] = 0;
@@ -536,13 +470,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
}
}
- if (priv->rf_type == RF_1T2R) {
- /* no matter what checkpatch says, the braces are needed */
- RT_TRACE(COMP_INIT, "\n1T2R config\n");
- } else if (priv->rf_type == RF_2T4R) {
- RT_TRACE(COMP_INIT, "\n2T4R config\n");
- }
-
rtl92e_init_adaptive_rate(dev);
priv->rf_chip = RF_8256;
@@ -574,8 +501,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
else
priv->ChannelPlan = 0x0;
- RT_TRACE(COMP_INIT, "Toshiba ChannelPlan = 0x%x\n",
- priv->ChannelPlan);
break;
case EEPROM_CID_Nettronix:
priv->ScanDelay = 100;
@@ -602,10 +527,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->rtllib->bSupportRemoteWakeUp = true;
else
priv->rtllib->bSupportRemoteWakeUp = false;
-
- RT_TRACE(COMP_INIT, "RegChannelPlan(%d)\n", priv->RegChannelPlan);
- RT_TRACE(COMP_INIT, "ChannelPlan = %d\n", priv->ChannelPlan);
- RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
}
void rtl92e_get_eeprom_size(struct net_device *dev)
@@ -613,14 +534,9 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
u16 curCR;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
curCR = rtl92e_readw(dev, EPROM_CMD);
- RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
- curCR);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
EEPROM_93C46;
- RT_TRACE(COMP_INIT, "<===========%s(), epromtype:%d\n", __func__,
- priv->epromtype);
_rtl92e_read_eeprom_info(dev);
}
@@ -697,7 +613,6 @@ bool rtl92e_start_adapter(struct net_device *dev)
int i = 0;
u32 retry_times = 0;
- RT_TRACE(COMP_INIT, "====>%s()\n", __func__);
priv->being_init_adapter = true;
start:
@@ -710,7 +625,7 @@ start:
priv->pFirmware->status = FW_STATUS_0_INIT;
if (priv->RegRfOff)
- priv->rtllib->eRFPowerState = eRfOff;
+ priv->rtllib->rf_power_state = rf_off;
ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->pFirmware->status == FW_STATUS_0_INIT)
@@ -732,13 +647,11 @@ start:
rtl92e_writeb(dev, SWREGULATOR, 0xb8);
}
}
- RT_TRACE(COMP_INIT, "BB Config Start!\n");
rtStatus = rtl92e_config_bb(dev);
if (!rtStatus) {
netdev_warn(dev, "%s(): Failed to configure BB\n", __func__);
return rtStatus;
}
- RT_TRACE(COMP_INIT, "BB Config Finished!\n");
priv->LoopbackMode = RTL819X_NO_LOOPBACK;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
@@ -818,19 +731,7 @@ start:
tmpvalue = rtl92e_readb(dev, IC_VERRSION);
priv->IC_Cut = tmpvalue;
- RT_TRACE(COMP_INIT, "priv->IC_Cut= 0x%x\n", priv->IC_Cut);
- if (priv->IC_Cut >= IC_VersionCut_D) {
- if (priv->IC_Cut == IC_VersionCut_D) {
- /* no matter what checkpatch says, braces are needed */
- RT_TRACE(COMP_INIT, "D-cut\n");
- } else if (priv->IC_Cut == IC_VersionCut_E) {
- RT_TRACE(COMP_INIT, "E-cut\n");
- }
- } else {
- RT_TRACE(COMP_INIT, "Before C-cut\n");
- }
- RT_TRACE(COMP_INIT, "Load Firmware!\n");
bfirmwareok = rtl92e_init_fw(dev);
if (!bfirmwareok) {
if (retry_times < 10) {
@@ -841,15 +742,13 @@ start:
goto end;
}
}
- RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
+
if (priv->ResetProgress == RESET_TYPE_NORESET) {
- RT_TRACE(COMP_INIT, "RF Config Started!\n");
rtStatus = rtl92e_config_phy(dev);
if (!rtStatus) {
netdev_info(dev, "RF Config failed\n");
return rtStatus;
}
- RT_TRACE(COMP_INIT, "RF Config Finished!\n");
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -858,25 +757,14 @@ start:
rtl92e_writeb(dev, 0x87, 0x0);
if (priv->RegRfOff) {
- RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
- "%s(): Turn off RF for RegRfOff ----------\n",
- __func__);
- rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_SW);
- } else if (priv->rtllib->RfOffReason > RF_CHANGE_BY_PS) {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
- "%s(): Turn off RF for RfOffReason(%d) ----------\n",
- __func__, priv->rtllib->RfOffReason);
- rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
- } else if (priv->rtllib->RfOffReason >= RF_CHANGE_BY_IPS) {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
- "%s(): Turn off RF for RfOffReason(%d) ----------\n",
- __func__, priv->rtllib->RfOffReason);
- rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
+ rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_SW);
+ } else if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_PS) {
+ rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
+ } else if (priv->rtllib->rf_off_reason >= RF_CHANGE_BY_IPS) {
+ rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
} else {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON\n",
- __func__);
- priv->rtllib->eRFPowerState = eRfOn;
- priv->rtllib->RfOffReason = 0;
+ priv->rtllib->rf_power_state = rf_on;
+ priv->rtllib->rf_off_reason = 0;
}
if (priv->rtllib->FwRWRF)
@@ -915,18 +803,6 @@ start:
priv->CCKPresentAttentuation_difference = 0;
priv->CCKPresentAttentuation =
priv->CCKPresentAttentuation_20Mdefault;
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_initial = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real__initial = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference_initial = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_initial = %d\n",
- priv->CCKPresentAttentuation);
priv->btxpower_tracking = false;
}
}
@@ -946,7 +822,7 @@ static void _rtl92e_net_update(struct net_device *dev)
net = &priv->rtllib->current_network;
rtl92e_config_rate(dev, &rate_config);
- priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
@@ -1237,7 +1113,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
static u8 tmp;
if (!tmp) {
- RT_TRACE(COMP_DBG, "==>================hw sec\n");
tmp = 1;
}
switch (priv->rtllib->pairwise_key_type) {
@@ -1350,12 +1225,6 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
case DESC90_RATE54M:
ret_rate = MGN_54M;
break;
-
- default:
- RT_TRACE(COMP_RECV,
- "%s: Non supportedRate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
}
} else {
@@ -1411,12 +1280,6 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
case DESC90_RATEMCS32:
ret_rate = 0x80 | 0x20;
break;
-
- default:
- RT_TRACE(COMP_RECV,
- "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
}
}
@@ -1721,9 +1584,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
if (!rtl92e_is_legal_rf_path(priv->rtllib->dev, rfpath))
continue;
- RT_TRACE(COMP_DBG,
- "Jacken -> pPreviousstats->RxMIMOSignalStrength[rfpath] = %d\n",
- prev_st->RxMIMOSignalStrength[rfpath]);
if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
priv->stats.rx_rssi_percentage[rfpath] =
prev_st->RxMIMOSignalStrength[rfpath];
@@ -1745,9 +1605,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(prev_st->RxMIMOSignalStrength[rfpath])) /
(RX_SMOOTH);
}
- RT_TRACE(COMP_DBG,
- "Jacken -> priv->RxStats.RxRSSIPercentage[rfPath] = %d\n",
- priv->stats.rx_rssi_percentage[rfpath]);
}
}
@@ -1772,11 +1629,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
}
-
- RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- prev_st->bIsCCK ? "CCK" : "OFDM",
- prev_st->RxPWDBAll);
-
if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (priv->undecorated_smoothed_pwdb < 0)
@@ -2052,11 +1904,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->RxIs40MHzPacket = pDrvInfo->BW;
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
-
- if (pDrvInfo->FirstAGGR == 1 || pDrvInfo->PartAggr == 1)
- RT_TRACE(COMP_RXDESC,
- "pDrvInfo->FirstAGGR = %d, pDrvInfo->PartAggr = %d\n",
- pDrvInfo->FirstAGGR, pDrvInfo->PartAggr);
skb_trim(skb, skb->len - 4/*sCrcLng*/);
@@ -2138,7 +1985,7 @@ void rtl92e_update_ratr_table(struct net_device *dev)
break;
case IEEE_N_24G:
case IEEE_N_5G:
- if (ieee->pHTInfo->PeerMimoPs == 0) {
+ if (ieee->pHTInfo->peer_mimo_ps == 0) {
ratr_value &= 0x0007F007;
} else {
if (priv->rf_type == RF_1T2R)
@@ -2151,10 +1998,10 @@ void rtl92e_update_ratr_table(struct net_device *dev)
break;
}
ratr_value &= 0x0FFFFFFF;
- if (ieee->pHTInfo->bCurTxBW40MHz &&
+ if (ieee->pHTInfo->cur_tx_bw40mhz &&
ieee->pHTInfo->bCurShortGI40MHz)
ratr_value |= 0x80000000;
- else if (!ieee->pHTInfo->bCurTxBW40MHz &&
+ else if (!ieee->pHTInfo->cur_tx_bw40mhz &&
ieee->pHTInfo->bCurShortGI20MHz)
ratr_value |= 0x80000000;
rtl92e_writel(dev, RATR0+rate_index*4, ratr_value);
@@ -2261,9 +2108,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
u8 i;
u8 SilentResetRxSoltNum = 4;
- RT_TRACE(COMP_RESET, "%s(): RegRxCounter is %d, RxCounter is %d\n",
- __func__, RegRxCounter, priv->RxCounter);
-
rx_chk_cnt++;
if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5)) {
rx_chk_cnt = 0;
@@ -2321,9 +2165,6 @@ bool rtl92e_is_tx_stuck(struct net_device *dev)
bool bStuck = false;
u16 RegTxCounter = rtl92e_readw(dev, 0x128);
- RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n",
- __func__, RegTxCounter, priv->TxCounter);
-
if (priv->TxCounter == RegTxCounter)
bStuck = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 38110fa4f36d..789d288d7503 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -77,10 +77,6 @@ static bool _rtl92e_fw_check_ready(struct net_device *dev,
rt_status = _rtl92e_wait_for_fw(dev, CPU_GEN_FIRM_RDY, 20);
if (rt_status)
pfirmware->status = FW_STATUS_5_READY;
- else
- RT_TRACE(COMP_FIRMWARE,
- "_rtl92e_is_fw_ready fail(%d)!\n",
- rt_status);
break;
default:
rt_status = false;
@@ -149,9 +145,6 @@ bool rtl92e_init_fw(struct net_device *dev)
} else if (pfirmware->status == FW_STATUS_5_READY) {
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
- } else {
- RT_TRACE(COMP_FIRMWARE,
- "PlatformInitFirmware: undefined firmware state\n");
}
for (i = starting_state; i <= FW_INIT_STEP2_DATA; i++) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index f92551094738..1b592258e640 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -117,8 +117,6 @@ static u32 _rtl92e_phy_rf_read(struct net_device *dev,
} else
NewOffset = Offset;
} else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
NewOffset = Offset;
}
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
@@ -173,8 +171,6 @@ static void _rtl92e_phy_rf_write(struct net_device *dev,
} else
NewOffset = Offset;
} else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
NewOffset = Offset;
}
@@ -204,10 +200,9 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return;
- if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return;
- RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n");
if (priv->Rf_Mode == RF_OP_By_FW) {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath,
@@ -242,7 +237,7 @@ u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return 0;
- if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return 0;
mutex_lock(&priv->rf_mutex);
if (priv->Rf_Mode == RF_OP_By_FW) {
@@ -312,19 +307,14 @@ void rtl92e_config_mac(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bTXPowerDataReadFromEEPORM) {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
dwArrayLen = MACPHY_Array_PGLength;
pdwArray = Rtl819XMACPHY_Array_PG;
} else {
- RT_TRACE(COMP_PHY, "Read rtl819XMACPHY_Array\n");
dwArrayLen = MACPHY_ArrayLength;
pdwArray = Rtl819XMACPHY_Array;
}
for (i = 0; i < dwArrayLen; i += 3) {
- RT_TRACE(COMP_DBG,
- "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n",
- pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
if (pdwArray[i] == 0x318)
pdwArray[i+2] = 0x00000800;
rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i+1],
@@ -357,20 +347,12 @@ static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
bMaskDWord,
Rtl819XPHY_REGArray_Table[i+1]);
- RT_TRACE(COMP_DBG,
- "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",
- i, Rtl819XPHY_REGArray_Table[i],
- Rtl819XPHY_REGArray_Table[i+1]);
}
} else if (ConfigType == BaseBand_Config_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
bMaskDWord,
Rtl819XAGCTAB_Array_Table[i+1]);
- RT_TRACE(COMP_DBG,
- "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x\n",
- i, Rtl819XAGCTAB_Array_Table[i],
- Rtl819XAGCTAB_Array_Table[i+1]);
}
}
}
@@ -478,8 +460,6 @@ bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
WriteAddr[HW90_BLOCK_PHY0] = 0x900;
WriteAddr[HW90_BLOCK_PHY1] = 0x800;
WriteAddr[HW90_BLOCK_RF] = 0x3;
- RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __func__,
- CheckBlock);
if (CheckBlock == HW90_BLOCK_MAC) {
netdev_warn(dev, "%s(): No checks available for MAC block.\n",
@@ -543,9 +523,6 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
(enum hw90_block)eCheckItem,
(enum rf90_radio_path)0);
if (!rtStatus) {
- RT_TRACE((COMP_ERR | COMP_PHY),
- "rtl92e_config_rf():Check PHY%d Fail!!\n",
- eCheckItem-1);
return rtStatus;
}
}
@@ -602,15 +579,9 @@ void rtl92e_get_tx_power(struct net_device *dev)
priv->DefaultInitialGain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
priv->DefaultInitialGain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
priv->DefaultInitialGain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
- RT_TRACE(COMP_INIT,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
- priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
priv->framesyncC34 = rtl92e_readl(dev, rOFDM0_RxDetector2);
- RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
- rOFDM0_RxDetector3, priv->framesync);
priv->SifsTime = rtl92e_readw(dev, SIFS);
}
@@ -813,9 +784,6 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
struct sw_chnl_cmd *CurrentCmd = NULL;
u8 eRFPath;
- RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
- __func__, *stage, *step, channel);
-
if (!rtllib_legal_channel(priv->rtllib, channel)) {
netdev_err(dev, "Invalid channel requested: %d\n", channel);
return true;
@@ -976,21 +944,13 @@ static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n");
-
- RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __func__,
- priv->chan, priv);
-
_rtl92e_phy_switch_channel(dev, priv->chan);
-
- RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
}
u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_PHY, "=====>%s()\n", __func__);
if (!priv->up) {
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return false;
@@ -1060,10 +1020,6 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
if (priv->CCKPresentAttentuation < 0)
priv->CCKPresentAttentuation = 0;
- RT_TRACE(COMP_POWER_TRACKING,
- "20M, priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
-
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
@@ -1082,9 +1038,6 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
priv->CCKPresentAttentuation_40Mdefault +
priv->CCKPresentAttentuation_difference;
- RT_TRACE(COMP_POWER_TRACKING,
- "40M, priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
if (priv->CCKPresentAttentuation >
(CCKTxBBGainTableLength - 1))
priv->CCKPresentAttentuation =
@@ -1123,16 +1076,10 @@ static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev)
if (priv->Record_CCK_20Mindex == 0)
priv->Record_CCK_20Mindex = 6;
priv->CCK_index = priv->Record_CCK_20Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "20MHz, %s,CCK_index = %d\n", __func__,
- priv->CCK_index);
break;
case HT_CHANNEL_WIDTH_20_40:
priv->CCK_index = priv->Record_CCK_40Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "40MHz, %s, CCK_index = %d\n", __func__,
- priv->CCK_index);
break;
}
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
@@ -1154,12 +1101,6 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
u8 regBwOpMode;
- RT_TRACE(COMP_SWBW,
- "==>%s Switch to %s bandwidth\n", __func__,
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
-
-
if (priv->rf_chip == RF_PSEUDO_11N) {
priv->SetBWModeInProgress = false;
return;
@@ -1251,11 +1192,9 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
atomic_dec(&(priv->rtllib->atm_swbw));
priv->SetBWModeInProgress = false;
-
- RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()");
}
-void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
+void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1267,7 +1206,7 @@ void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
atomic_inc(&(priv->rtllib->atm_swbw));
priv->SetBWModeInProgress = true;
- priv->CurrentChannelBW = Bandwidth;
+ priv->CurrentChannelBW = bandwidth;
if (Offset == HT_EXTCHNL_OFFSET_LOWER)
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
@@ -1291,8 +1230,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
if (priv->up) {
switch (Operation) {
case IG_Backup:
- RT_TRACE(COMP_SCAN,
- "IG_Backup, backup the initial gain.\n");
initial_gain = SCAN_RX_INITIAL_GAIN;
BitMask = bMaskByte0;
if (dm_digtable.dig_algorithm ==
@@ -1314,35 +1251,13 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
- initial_gain);
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
- RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
- POWER_DETECTION_TH);
rtl92e_writeb(dev, 0xa0a, POWER_DETECTION_TH);
break;
case IG_Restore:
- RT_TRACE(COMP_SCAN,
- "IG_Restore, restore the initial gain.\n");
BitMask = 0x7f;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
@@ -1360,22 +1275,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
(u32)priv->initgain_backup.cca);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
rtl92e_set_tx_power(dev,
priv->rtllib->current_network.channel);
@@ -1383,9 +1282,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
break;
- default:
- RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
- break;
}
}
}
@@ -1405,7 +1301,7 @@ void rtl92e_set_rf_off(struct net_device *dev)
}
static bool _rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState)
+ enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -1416,15 +1312,13 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
if (priv->SetRFPowerStateInProgress)
return false;
- RT_TRACE(COMP_PS, "===========> %s!\n", __func__);
priv->SetRFPowerStateInProgress = true;
switch (priv->rf_chip) {
case RF_8256:
- switch (eRFPowerState) {
- case eRfOn:
- RT_TRACE(COMP_PS, "%s eRfOn!\n", __func__);
- if ((priv->rtllib->eRFPowerState == eRfOff) &&
+ switch (rf_power_state) {
+ case rf_on:
+ if ((priv->rtllib->rf_power_state == rf_off) &&
RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
u32 InitilizeCount = 3;
@@ -1469,8 +1363,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
break;
- case eRfSleep:
- if (priv->rtllib->eRFPowerState == eRfOff)
+ case rf_sleep:
+ if (priv->rtllib->rf_power_state == rf_off)
break;
@@ -1481,25 +1375,18 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
QueueID++;
continue;
} else {
- RT_TRACE((COMP_POWER|COMP_RF),
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 before doze!\n",
- (i+1), QueueID);
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! %s: eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
- __func__, MAX_DOZE_WAITING_TIMES_9x, QueueID);
break;
}
}
rtl92e_set_rf_off(dev);
break;
- case eRfOff:
- RT_TRACE(COMP_PS, "%s eRfOff/Sleep !\n", __func__);
-
+ case rf_off:
for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
ring = &priv->tx_ring[QueueID];
@@ -1507,18 +1394,11 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
QueueID++;
continue;
} else {
- RT_TRACE(COMP_POWER,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 before doze!\n",
- (i+1), QueueID);
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(COMP_POWER,
- "\n\n\n SetZebra: RFPowerState8185B(): eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
- MAX_DOZE_WAITING_TIMES_9x,
- QueueID);
break;
}
}
@@ -1538,7 +1418,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
bResult = false;
netdev_warn(dev,
"%s(): Unknown state requested: 0x%X.\n",
- __func__, eRFPowerState);
+ __func__, rf_power_state);
break;
}
@@ -1550,7 +1430,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
if (bResult) {
- priv->rtllib->eRFPowerState = eRFPowerState;
+ priv->rtllib->rf_power_state = rf_power_state;
switch (priv->rf_chip) {
case RF_8256:
@@ -1563,30 +1443,22 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
priv->SetRFPowerStateInProgress = false;
- RT_TRACE(COMP_PS, "<=========== %s bResult = %d!\n", __func__, bResult);
return bResult;
}
bool rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState)
+ enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bResult = false;
- RT_TRACE(COMP_PS,
- "---------> %s: eRFPowerState(%d)\n", __func__, eRFPowerState);
- if (eRFPowerState == priv->rtllib->eRFPowerState &&
+ if (rf_power_state == priv->rtllib->rf_power_state &&
priv->bHwRfOffAction == 0) {
- RT_TRACE(COMP_PS, "<--------- %s: discard the request for eRFPowerState(%d) is the same.\n",
- __func__, eRFPowerState);
return bResult;
}
- bResult = _rtl92e_set_rf_power_state(dev, eRFPowerState);
-
- RT_TRACE(COMP_PS, "<--------- %s: bResult(%d)\n", __func__, bResult);
-
+ bResult = _rtl92e_set_rf_power_state(dev, rf_power_state);
return bResult;
}
@@ -1603,10 +1475,6 @@ void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
case SCAN_OPT_RESTORE:
priv->rtllib->InitialGainHandler(dev, IG_Restore);
break;
-
- default:
- RT_TRACE(COMP_SCAN, "Unknown Scan Backup Operation.\n");
- break;
}
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 7c9148e033d8..75629f5df954 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -75,15 +75,14 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
void rtl92e_set_bw_mode(struct net_device *dev,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void rtl92e_init_gain(struct net_device *dev, u8 Operation);
void rtl92e_set_rf_off(struct net_device *dev);
bool rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState);
-#define PHY_SetRFPowerState rtl92e_set_rf_power_state
+ enum rt_rf_power_state rf_power_state);
void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index d7630f02a910..41faeb4b9b9b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -42,14 +42,10 @@ void rtl92e_enable_hw_security_config(struct net_device *dev)
ieee->hwsec_active = 1;
- if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
+ if ((ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
-
- RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
- __func__, ieee->hwsec_active, ieee->pairwise_key_type,
- SECR_value);
rtl92e_writeb(dev, SECR, SECR_value);
}
@@ -60,10 +56,6 @@ void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- RT_TRACE(COMP_DBG,
- "===========>%s():EntryNo is %d,KeyIndex is %d,KeyType is %d,is_mesh is %d\n",
- __func__, EntryNo, KeyIndex, KeyType, is_mesh);
-
if (EntryNo >= TOTAL_CAM_ENTRY)
return;
@@ -86,12 +78,12 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 usConfig = 0;
u8 i;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
@@ -107,10 +99,6 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
return;
}
- RT_TRACE(COMP_SEC,
- "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
- __func__, dev, EntryNo, KeyIndex, KeyType, MacAddr);
-
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
else
@@ -144,7 +132,6 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
}
}
}
- RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
}
void rtl92e_cam_restore(struct net_device *dev)
@@ -163,9 +150,6 @@ void rtl92e_cam_restore(struct net_device *dev)
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
-
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index b9ce71848023..89bc989cffba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -25,7 +25,6 @@
int hwwep = 1;
static char *ifname = "wlan%d";
-
static const struct rtl819x_ops rtl819xp_ops = {
.nic_type = NIC_8192E,
.get_eeprom_size = rtl92e_get_eeprom_size,
@@ -44,8 +43,8 @@ static const struct rtl819x_ops rtl819xp_ops = {
.rx_enable = rtl92e_enable_rx,
.tx_enable = rtl92e_enable_tx,
.interrupt_recognized = rtl92e_ack_irq,
- .TxCheckStuckHandler = rtl92e_is_tx_stuck,
- .RxCheckStuckHandler = rtl92e_is_rx_stuck,
+ .tx_check_stuck_handler = rtl92e_is_tx_stuck,
+ .rx_check_stuck_handler = rtl92e_is_rx_stuck,
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
@@ -133,36 +132,27 @@ void rtl92e_writew(struct net_device *dev, int x, u16 y)
* -----------------------------GENERAL FUNCTION-------------------------
****************************************************************************/
bool rtl92e_set_rf_state(struct net_device *dev,
- enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource)
+ enum rt_rf_power_state state_to_set,
+ RT_RF_CHANGE_SOURCE change_source)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- bool bActionAllowed = false;
- bool bConnectBySSID = false;
- enum rt_rf_power_state rtState;
- u16 RFWaitCounter = 0;
+ bool action_allowed = false;
+ bool connect_by_ssid = false;
+ enum rt_rf_power_state rt_state;
+ u16 rf_wait_counter = 0;
unsigned long flag;
- RT_TRACE((COMP_PS | COMP_RF),
- "===>%s: StateToSet(%d)\n", __func__, StateToSet);
-
while (true) {
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: RF Change in progress! Wait to set..StateToSet(%d).\n",
- __func__, StateToSet);
-
- while (priv->RFChangeInProgress) {
- RFWaitCounter++;
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Wait 1 ms (%d times)...\n",
- __func__, RFWaitCounter);
+
+ while (priv->rf_change_in_progress) {
+ rf_wait_counter++;
mdelay(1);
- if (RFWaitCounter > 100) {
+ if (rf_wait_counter > 100) {
netdev_warn(dev,
"%s(): Timeout waiting for RF change.\n",
__func__);
@@ -170,43 +160,37 @@ bool rtl92e_set_rf_state(struct net_device *dev,
}
}
} else {
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
break;
}
}
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
- switch (StateToSet) {
- case eRfOn:
- priv->rtllib->RfOffReason &= (~ChangeSource);
+ switch (state_to_set) {
+ case rf_on:
+ priv->rtllib->rf_off_reason &= (~change_source);
- if ((ChangeSource == RF_CHANGE_BY_HW) && priv->bHwRadioOff)
- priv->bHwRadioOff = false;
+ if ((change_source == RF_CHANGE_BY_HW) && priv->hw_radio_off)
+ priv->hw_radio_off = false;
- if (!priv->rtllib->RfOffReason) {
- priv->rtllib->RfOffReason = 0;
- bActionAllowed = true;
-
-
- if (rtState == eRfOff &&
- ChangeSource >= RF_CHANGE_BY_HW)
- bConnectBySSID = true;
- } else {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
- __func__, priv->rtllib->RfOffReason, ChangeSource);
- }
+ if (!priv->rtllib->rf_off_reason) {
+ priv->rtllib->rf_off_reason = 0;
+ action_allowed = true;
+ if (rt_state == rf_off &&
+ change_source >= RF_CHANGE_BY_HW)
+ connect_by_ssid = true;
+ }
break;
- case eRfOff:
+ case rf_off:
if ((priv->rtllib->iw_mode == IW_MODE_INFRA) ||
(priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
- if ((priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) ||
- (ChangeSource > RF_CHANGE_BY_IPS)) {
+ if ((priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) ||
+ (change_source > RF_CHANGE_BY_IPS)) {
if (ieee->state == RTLLIB_LINKED)
priv->blinked_ingpio = true;
else
@@ -215,46 +199,36 @@ bool rtl92e_set_rf_state(struct net_device *dev,
WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
- if ((ChangeSource == RF_CHANGE_BY_HW) && !priv->bHwRadioOff)
- priv->bHwRadioOff = true;
- priv->rtllib->RfOffReason |= ChangeSource;
- bActionAllowed = true;
+ if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off)
+ priv->hw_radio_off = true;
+ priv->rtllib->rf_off_reason |= change_source;
+ action_allowed = true;
break;
- case eRfSleep:
- priv->rtllib->RfOffReason |= ChangeSource;
- bActionAllowed = true;
+ case rf_sleep:
+ priv->rtllib->rf_off_reason |= change_source;
+ action_allowed = true;
break;
default:
break;
}
- if (bActionAllowed) {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
- __func__, StateToSet, priv->rtllib->RfOffReason);
- PHY_SetRFPowerState(dev, StateToSet);
- if (StateToSet == eRfOn) {
-
- if (bConnectBySSID && priv->blinked_ingpio) {
+ if (action_allowed) {
+ rtl92e_set_rf_power_state(dev, state_to_set);
+ if (state_to_set == rf_on) {
+ if (connect_by_ssid && priv->blinked_ingpio) {
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->blinked_ingpio = false;
}
}
- } else {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
- __func__, StateToSet, ChangeSource, priv->rtllib->RfOffReason);
}
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-
- RT_TRACE((COMP_PS | COMP_RF), "<===%s\n", __func__);
- return bActionAllowed;
+ return action_allowed;
}
static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
@@ -297,7 +271,6 @@ static void _rtl92e_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
if (priv->chan_forced)
return;
@@ -314,22 +287,16 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
bool ShortPreamble;
if (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- if (priv->dot11CurrentPreambleMode != PREAMBLE_SHORT) {
+ if (priv->dot11_current_preamble_mode != PREAMBLE_SHORT) {
ShortPreamble = true;
- priv->dot11CurrentPreambleMode = PREAMBLE_SHORT;
- RT_TRACE(COMP_DBG,
- "%s(): WLAN_CAPABILITY_SHORT_PREAMBLE\n",
- __func__);
+ priv->dot11_current_preamble_mode = PREAMBLE_SHORT;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
} else {
- if (priv->dot11CurrentPreambleMode != PREAMBLE_LONG) {
+ if (priv->dot11_current_preamble_mode != PREAMBLE_LONG) {
ShortPreamble = false;
- priv->dot11CurrentPreambleMode = PREAMBLE_LONG;
- RT_TRACE(COMP_DBG,
- "%s(): WLAN_CAPABILITY_LONG_PREAMBLE\n",
- __func__);
+ priv->dot11_current_preamble_mode = PREAMBLE_LONG;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
@@ -337,17 +304,17 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
if (net->mode & (IEEE_G | IEEE_N_24G)) {
u8 slot_time_val;
- u8 CurSlotTime = priv->slot_time;
+ u8 cur_slot_time = priv->slot_time;
if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) &&
- (!priv->rtllib->pHTInfo->bCurrentRT2RTLongSlotTime)) {
- if (CurSlotTime != SHORT_SLOT_TIME) {
+ (!priv->rtllib->pHTInfo->current_rt2rt_long_slot_time)) {
+ if (cur_slot_time != SHORT_SLOT_TIME) {
slot_time_val = SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
} else {
- if (CurSlotTime != NON_SHORT_SLOT_TIME) {
+ if (cur_slot_time != NON_SHORT_SLOT_TIME) {
slot_time_val = NON_SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
@@ -374,7 +341,7 @@ static void _rtl92e_update_beacon(void *data)
if (ieee->pHTInfo->bCurrentHTSupport)
HT_update_self_and_peer_setting(ieee, net);
- ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bd_rt2rt_long_slot_time;
+ ieee->pHTInfo->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
_rtl92e_update_cap(dev, net->capability);
}
@@ -389,13 +356,10 @@ static void _rtl92e_qos_activate(void *data)
mutex_lock(&priv->mutex);
if (priv->rtllib->state != RTLLIB_LINKED)
goto success;
- RT_TRACE(COMP_QOS,
- "qos active process with associate response received\n");
for (i = 0; i < QOS_QUEUE_NUM; i++)
priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
-
success:
mutex_unlock(&priv->mutex);
}
@@ -426,18 +390,14 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
network->qos_data.param_count;
priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS parameters change call qos_activate\n");
}
} else {
memcpy(&priv->rtllib->current_network.qos_data.parameters,
&def_qos_parameters, size);
- if ((network->qos_data.active == 1) && (active_network == 1)) {
+ if ((network->qos_data.active == 1) && (active_network == 1))
schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS was disabled call qos_activate\n");
- }
+
network->qos_data.active = 0;
network->qos_data.supported = 0;
}
@@ -455,7 +415,6 @@ static int _rtl92e_handle_beacon(struct net_device *dev,
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
-
}
static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
@@ -496,8 +455,6 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
spin_unlock_irqrestore(&priv->rtllib->lock, flags);
- RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
- network->flags, priv->rtllib->current_network.qos_data.active);
if (set_qos_param == 1) {
rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
schedule_work(&priv->qos_activate);
@@ -716,15 +673,9 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
- __func__, wireless_mode);
} else {
priv->rtllib->pHTInfo->bEnableHT = 0;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
- __func__, wireless_mode);
}
-
- RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
_rtl92e_refresh_support_rate(priv);
}
@@ -742,7 +693,6 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
priv->rtllib->ieee_up = 1;
priv->up_first_time = 0;
- RT_TRACE(COMP_INIT, "Bringing up iface");
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
@@ -751,7 +701,6 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
return -1;
}
- RT_TRACE(COMP_INIT, "start adapter finished\n");
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
priv->bfirst_init = false;
@@ -790,7 +739,6 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
priv->up = 0;
priv->rtllib->ieee_up = 0;
priv->bfirst_after_down = true;
- RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
@@ -807,29 +755,25 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- while (priv->RFChangeInProgress) {
+ while (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
if (RFInProgressTimeOut > 100) {
spin_lock_irqsave(&priv->rf_ps_lock, flags);
break;
}
- RT_TRACE(COMP_DBG,
- "===>%s():RF is in progress, need to wait until rf change is done.\n",
- __func__);
mdelay(1);
RFInProgressTimeOut++;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
}
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
priv->ops->stop_adapter(dev, false);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
udelay(100);
memset(&priv->rtllib->current_network, 0,
offsetof(struct rtllib_network, list));
- RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__);
return 0;
}
@@ -883,14 +827,13 @@ static void _rtl92e_init_priv_constant(struct net_device *dev)
pPSC->RegMaxLPSAwakeIntvl = 5;
}
-
static void _rtl92e_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 i;
priv->AcmMethod = eAcmWay2_SW;
- priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->rtllib->status = 0;
priv->polling_timer_on = 0;
priv->up_first_time = 1;
@@ -935,12 +878,12 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
memset(&priv->InterruptLog, 0, sizeof(struct log_int_8190));
priv->RxCounter = 0;
priv->rtllib->wx_set_enc = 0;
- priv->bHwRadioOff = false;
+ priv->hw_radio_off = false;
priv->RegRfOff = false;
priv->isRFOff = false;
priv->bInPowerSaveMode = false;
- priv->rtllib->RfOffReason = 0;
- priv->RFChangeInProgress = false;
+ priv->rtllib->rf_off_reason = 0;
+ priv->rf_change_in_progress = false;
priv->bHwRfOffAction = 0;
priv->SetRFPowerStateInProgress = false;
priv->rtllib->PowerSaveControl.bInactivePs = true;
@@ -949,7 +892,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->rtllib->PowerSaveControl.bFwCtrlLPS = false;
priv->rtllib->LPSDelayCnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
- priv->rtllib->eRFPowerState = eRfOn;
+ priv->rtllib->rf_power_state = rf_on;
priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->rtllib->iw_mode = IW_MODE_INFRA;
@@ -1032,7 +975,6 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
"rtl819x_init:Error channel plan! Set to default.\n");
priv->ChannelPlan = COUNTRY_CODE_FCC;
}
- RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
dot11d_init(priv->rtllib);
dot11d_channel_map(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
@@ -1072,7 +1014,6 @@ static short _rtl92e_init(struct net_device *dev)
}
priv->irq = dev->irq;
- RT_TRACE(COMP_INIT, "IRQ %d\n", dev->irq);
if (_rtl92e_pci_initdescring(dev) != 0) {
netdev_err(dev, "Endopoints initialization failed");
@@ -1149,11 +1090,8 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
if (bCheckFwTxCnt) {
- if (priv->ops->TxCheckStuckHandler(dev)) {
- RT_TRACE(COMP_RESET,
- "TxCheckStuck(): Fw indicates no Tx condition!\n");
+ if (priv->ops->tx_check_stuck_handler(dev))
return RESET_TYPE_SILENT;
- }
}
return RESET_TYPE_NORESET;
@@ -1163,10 +1101,8 @@ static enum reset_type _rtl92e_rx_check_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->ops->RxCheckStuckHandler(dev)) {
- RT_TRACE(COMP_RESET, "RxStuck Condition\n");
+ if (priv->ops->rx_check_stuck_handler(dev))
return RESET_TYPE_SILENT;
- }
return RESET_TYPE_NORESET;
}
@@ -1178,12 +1114,12 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
enum reset_type RxResetType = RESET_TYPE_NORESET;
enum rt_rf_power_state rfState;
- rfState = priv->rtllib->eRFPowerState;
+ rfState = priv->rtllib->rf_power_state;
- if (rfState == eRfOn)
+ if (rfState == rf_on)
TxResetType = _rtl92e_tx_check_stuck(dev);
- if (rfState == eRfOn &&
+ if (rfState == rf_on &&
(priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->state == RTLLIB_LINKED))
RxResetType = _rtl92e_rx_check_stuck(dev);
@@ -1201,7 +1137,6 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
} else {
return RESET_TYPE_NORESET;
}
-
}
static void _rtl92e_if_silent_reset(struct net_device *dev)
@@ -1213,17 +1148,14 @@ static void _rtl92e_if_silent_reset(struct net_device *dev)
unsigned long flag;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
-
- RT_TRACE(COMP_RESET, "=========>Reset progress!!\n");
-
priv->ResetProgress = RESET_TYPE_SILENT;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
goto END;
}
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
priv->bResetInProgress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
@@ -1242,12 +1174,7 @@ RESET_START:
}
priv->up = 0;
- RT_TRACE(COMP_RESET, "%s():======>start to down the driver\n",
- __func__);
mdelay(1000);
- RT_TRACE(COMP_RESET,
- "%s():111111111111111111111111======>start to down the driver\n",
- __func__);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
@@ -1275,16 +1202,8 @@ RESET_START:
rtl92e_dm_backup_state(dev);
mutex_unlock(&priv->wx_mutex);
- RT_TRACE(COMP_RESET,
- "%s():<==========down process is finished\n",
- __func__);
-
- RT_TRACE(COMP_RESET, "%s():<===========up process start\n",
- __func__);
reset_status = _rtl92e_up(dev, true);
- RT_TRACE(COMP_RESET,
- "%s():<===========up process is finished\n", __func__);
if (reset_status == -1) {
if (reset_times < 3) {
reset_times++;
@@ -1298,7 +1217,7 @@ RESET_START:
ieee->is_silent_reset = 1;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
rtl92e_enable_hw_security_config(dev);
@@ -1333,8 +1252,6 @@ END:
priv->bResetInProgress = false;
rtl92e_writeb(dev, UFWP, 1);
- RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n",
- priv->reset_count);
}
}
@@ -1375,7 +1292,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
- if (!priv->up || priv->bHwRadioOff)
+ if (!priv->up || priv->hw_radio_off)
return;
if (priv->rtllib->state >= RTLLIB_LINKED) {
@@ -1390,13 +1307,11 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!rtllib_act_scanning(priv->rtllib, false)) {
if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
RTLLIB_NOLINK) &&
- (ieee->eRFPowerState == eRfOn) && !ieee->is_set_key &&
+ (ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
if ((ieee->PowerSaveControl.ReturnPoint ==
IPS_CALLBACK_NONE) &&
(!ieee->bNetPromiscuousMode)) {
- RT_TRACE(COMP_PS,
- "====================>haha: rtl92e_ips_enter()\n");
rtl92e_ips_enter(dev);
}
}
@@ -1407,7 +1322,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->LinkDetectInfo.NumTxOkInPeriod > 100)
bBusyTraffic = true;
-
if (ieee->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
ieee->LinkDetectInfo.NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
@@ -1433,7 +1347,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
rtl92e_leisure_ps_leave(dev);
} else {
- RT_TRACE(COMP_LPS, "====>no link LPS leave\n");
rtl92e_leisure_ps_leave(dev);
}
@@ -1456,9 +1369,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
else
priv->check_roaming_cnt = 0;
-
if (priv->check_roaming_cnt > 0) {
- if (ieee->eRFPowerState == eRfOff)
+ if (ieee->rf_power_state == rf_off)
netdev_info(dev, "%s(): RF is off\n", __func__);
netdev_info(dev,
@@ -1487,12 +1399,11 @@ static void _rtl92e_watchdog_wq_cb(void *data)
}
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
-
}
spin_lock_irqsave(&priv->tx_lock, flags);
if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) &&
- (!priv->RFChangeInProgress) && (!pPSC->bSwRfProcessing)) {
+ (!priv->rf_change_in_progress) && (!pPSC->bSwRfProcessing)) {
ResetType = _rtl92e_if_check_reset(dev);
check_reset_cnt = 3;
}
@@ -1500,7 +1411,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL) {
priv->ResetProgress = RESET_TYPE_NORMAL;
- RT_TRACE(COMP_RESET, "%s(): NOMAL RESET\n", __func__);
return;
}
@@ -1510,7 +1420,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
priv->force_reset = false;
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
- RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
@@ -1541,7 +1450,6 @@ void rtl92e_tx_enable(struct net_device *dev)
rtllib_reset_queue(priv->rtllib);
}
-
static void _rtl92e_free_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1599,7 +1507,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
- if ((priv->rtllib->eRFPowerState == eRfOff) || !priv->up ||
+ if ((priv->rtllib->rf_power_state == rf_off) || !priv->up ||
priv->bResetInProgress) {
kfree_skb(skb);
return;
@@ -1632,7 +1540,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
u8 queue_index = tcb_desc->queue_index;
if (queue_index != TXCMD_QUEUE) {
- if ((priv->rtllib->eRFPowerState == eRfOff) ||
+ if ((priv->rtllib->rf_power_state == rf_off) ||
!priv->up || priv->bResetInProgress) {
kfree_skb(skb);
return 0;
@@ -1936,13 +1844,11 @@ long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
return signal_power;
}
-
void rtl92e_update_rx_statistics(struct r8192_priv *priv,
struct rtllib_rx_stats *pprevious_stats)
{
int weighting = 0;
-
if (priv->stats.recv_signal_power == 0)
priv->stats.recv_signal_power =
pprevious_stats->RecvSignalPower;
@@ -1985,8 +1891,6 @@ void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
}
-
-
static void _rtl92e_rx_normal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2086,7 +1990,6 @@ done:
priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
priv->rxringcount;
}
-
}
static void _rtl92e_tx_resume(struct net_device *dev)
@@ -2151,7 +2054,6 @@ static int _rtl92e_open(struct net_device *dev)
ret = _rtl92e_try_up(dev);
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int _rtl92e_try_up(struct net_device *dev)
@@ -2163,7 +2065,6 @@ static int _rtl92e_try_up(struct net_device *dev)
return _rtl92e_up(dev, false);
}
-
static int _rtl92e_close(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2181,7 +2082,6 @@ static int _rtl92e_close(struct net_device *dev)
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int _rtl92e_down(struct net_device *dev, bool shutdownrf)
@@ -2224,10 +2124,8 @@ static void _rtl92e_set_multicast(struct net_device *dev)
promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->promisc = promisc;
-
}
-
static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2278,21 +2176,13 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
goto done;
}
- if (inta & IMR_TBDOK) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ if (inta & IMR_TBDOK)
priv->stats.txbeaconokint++;
- }
- if (inta & IMR_TBDER) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ if (inta & IMR_TBDER)
priv->stats.txbeaconerr++;
- }
-
- if (inta & IMR_BDOK)
- RT_TRACE(COMP_INTR, "beacon interrupt!\n");
if (inta & IMR_MGNTDOK) {
- RT_TRACE(COMP_INTR, "Manage ok interrupt!\n");
priv->stats.txmanageokint++;
_rtl92e_tx_isr(dev, MGNT_QUEUE);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -2319,13 +2209,10 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if (inta & IMR_BcnInt) {
- RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n");
+ if (inta & IMR_BcnInt)
tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
- }
if (inta & IMR_RDU) {
- RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
priv->stats.rxrdu++;
rtl92e_writel(dev, INTA_MASK,
rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
@@ -2333,7 +2220,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
}
if (inta & IMR_RXFOVW) {
- RT_TRACE(COMP_INTR, "rx overflow !\n");
priv->stats.rxoverflow++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
@@ -2342,21 +2228,18 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
priv->stats.txoverflow++;
if (inta & IMR_BKDOK) {
- RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n");
priv->stats.txbkokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
- RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n");
priv->stats.txbeokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
- RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n");
priv->stats.txviokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VI_QUEUE);
@@ -2364,7 +2247,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
if (inta & IMR_VODOK) {
priv->stats.txvookint++;
- RT_TRACE(COMP_INTR, "Vo TX OK interrupt!\n");
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VO_QUEUE);
}
@@ -2376,8 +2258,6 @@ done:
return IRQ_HANDLED;
}
-
-
/****************************************************************************
* ---------------------------- PCI_STUFF---------------------------
****************************************************************************/
@@ -2402,8 +2282,6 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
int err = -ENOMEM;
u8 revision_id;
- RT_TRACE(COMP_INIT, "Configuring chip resources");
-
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Failed to enable PCI device");
return -EIO;
@@ -2452,7 +2330,6 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
goto err_rel_rtllib;
}
-
ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
@@ -2483,13 +2360,9 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
dev->type = ARPHRD_ETHER;
dev->watchdog_timeo = HZ * 3;
- if (dev_alloc_name(dev, ifname) < 0) {
- RT_TRACE(COMP_INIT,
- "Oops: devname already taken! Trying wlan%%d...\n");
+ if (dev_alloc_name(dev, ifname) < 0)
dev_alloc_name(dev, ifname);
- }
- RT_TRACE(COMP_INIT, "Driver probe completed1\n");
if (_rtl92e_init(dev) != 0) {
netdev_warn(dev, "Initialization failed");
goto err_free_irq;
@@ -2500,12 +2373,10 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
if (register_netdev(dev))
goto err_free_irq;
- RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
- RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
err_free_irq:
@@ -2560,7 +2431,6 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
}
pci_disable_device(pdev);
- RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
bool rtl92e_enable_nic(struct net_device *dev)
@@ -2576,7 +2446,6 @@ bool rtl92e_enable_nic(struct net_device *dev)
return false;
}
- RT_TRACE(COMP_PS, "===========>%s()\n", __func__);
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
@@ -2584,13 +2453,11 @@ bool rtl92e_enable_nic(struct net_device *dev)
priv->bdisable_nic = false;
return false;
}
- RT_TRACE(COMP_INIT, "start adapter finished\n");
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
priv->bfirst_init = false;
rtl92e_irq_enable(dev);
priv->bdisable_nic = false;
- RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
return init_status;
}
@@ -2599,7 +2466,6 @@ bool rtl92e_disable_nic(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
u8 tmp_state = 0;
- RT_TRACE(COMP_PS, "=========>%s()\n", __func__);
priv->bdisable_nic = true;
tmp_state = priv->rtllib->state;
rtllib_softmac_stop_protocol(priv->rtllib, 0, false);
@@ -2608,8 +2474,6 @@ bool rtl92e_disable_nic(struct net_device *dev)
rtl92e_irq_disable(dev);
priv->ops->stop_adapter(dev, false);
- RT_TRACE(COMP_PS, "<=========%s()\n", __func__);
-
return true;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 698552a92100..7021f9c435d9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -299,8 +299,8 @@ struct rtl819x_ops {
void (*tx_enable)(struct net_device *dev);
void (*interrupt_recognized)(struct net_device *dev,
u32 *p_inta, u32 *p_intb);
- bool (*TxCheckStuckHandler)(struct net_device *dev);
- bool (*RxCheckStuckHandler)(struct net_device *dev);
+ bool (*tx_check_stuck_handler)(struct net_device *dev);
+ bool (*rx_check_stuck_handler)(struct net_device *dev);
};
struct r8192_priv {
@@ -392,7 +392,7 @@ struct r8192_priv {
u16 ShortRetryLimit;
u16 LongRetryLimit;
- bool bHwRadioOff;
+ bool hw_radio_off;
bool blinked_ingpio;
u8 polling_timer_on;
@@ -430,7 +430,7 @@ struct r8192_priv {
u16 basic_rate;
u8 short_preamble;
- u8 dot11CurrentPreambleMode;
+ u8 dot11_current_preamble_mode;
u8 slot_time;
u16 SifsTime;
@@ -478,7 +478,7 @@ struct r8192_priv {
bool bInPowerSaveMode;
u8 bHwRfOffAction;
- bool RFChangeInProgress;
+ bool rf_change_in_progress;
bool SetRFPowerStateInProgress;
bool bdisable_nic;
@@ -598,6 +598,6 @@ bool rtl92e_enable_nic(struct net_device *dev);
bool rtl92e_disable_nic(struct net_device *dev);
bool rtl92e_set_rf_state(struct net_device *dev,
- enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource);
+ enum rt_rf_power_state state_to_set,
+ RT_RF_CHANGE_SOURCE change_source);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index d58800d06e8f..702551056227 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -268,8 +268,6 @@ static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
NULL};
if (priv->ResetProgress == RESET_TYPE_SILENT) {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),
- "GPIOChangeRFWorkItemCallBack(): Silent Reset!!!!!!!\n");
return;
}
@@ -333,8 +331,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
static u8 ping_rssi_state;
if (!priv->up) {
- RT_TRACE(COMP_RATE,
- "<---- %s: driver is going to unload\n", __func__);
return;
}
@@ -347,9 +343,9 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (priv->rtllib->state == RTLLIB_LINKED) {
- bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz &&
+ bshort_gi_enabled = (pHTInfo->cur_tx_bw40mhz &&
pHTInfo->bCurShortGI40MHz) ||
- (!pHTInfo->bCurTxBW40MHz &&
+ (!pHTInfo->cur_tx_bw40mhz &&
pHTInfo->bCurShortGI20MHz);
pra->upper_rssi_threshold_ratr =
@@ -423,9 +419,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
u32 ratr_value;
ratr_value = targetRATR;
- RT_TRACE(COMP_RATE,
- "currentRATR = %x, targetRATR = %x\n",
- currentRATR, targetRATR);
if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
@@ -628,7 +621,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
u32 delta = 0;
- RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
@@ -637,10 +629,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
- RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n",
- powerlevelOFDM24G);
-
-
for (j = 0; j <= 30; j++) {
tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
@@ -656,15 +644,11 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
mdelay(1);
if (priv->bResetInProgress) {
- RT_TRACE(COMP_POWER_TRACKING,
- "we are in silent reset progress, so return\n");
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
- if (priv->rtllib->eRFPowerState != eRfOn) {
- RT_TRACE(COMP_POWER_TRACKING,
- "we are in power save, so return\n");
+ if (priv->rtllib->rf_power_state != rf_on) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
@@ -689,10 +673,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value2);
- RT_TRACE(COMP_POWER_TRACKING,
- "TSSI_report_value = %d\n",
- tmp_report[k]);
-
if (tmp_report[k] <= 20) {
viviflag = true;
break;
@@ -702,8 +682,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
if (viviflag) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
viviflag = false;
- RT_TRACE(COMP_POWER_TRACKING,
- "we filted this data\n");
for (k = 0; k < 5; k++)
tmp_report[k] = 0;
break;
@@ -713,12 +691,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
Avg_TSSI_Meas_from_driver += tmp_report[k];
Avg_TSSI_Meas_from_driver *= 100 / 5;
- RT_TRACE(COMP_POWER_TRACKING,
- "Avg_TSSI_Meas_from_driver = %d\n",
- Avg_TSSI_Meas_from_driver);
TSSI_13dBm = priv->TSSI_13dBm;
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n",
- TSSI_13dBm);
if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
@@ -729,20 +702,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING,
- "tx power track is done\n");
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
return;
}
if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
@@ -785,26 +744,12 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
} else
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
if (priv->CCKPresentAttentuation_difference <= -12 ||
priv->CCKPresentAttentuation_difference >= 24) {
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING,
- "tx power track--->limited\n");
return;
}
@@ -834,10 +779,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
for (i = 0; i < OFDM_Table_Length; i++) {
if (tmpRegA == OFDMSwingTable[i]) {
priv->OFDM_index[0] = i;
- RT_TRACE(COMP_POWER_TRACKING,
- "Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
- rOFDM0_XATxIQImbalance, tmpRegA,
- priv->OFDM_index[0]);
}
}
@@ -845,10 +786,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
for (i = 0; i < CCK_Table_length; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
priv->CCK_index = i;
- RT_TRACE(COMP_POWER_TRACKING,
- "Initial reg0x%x = 0x%x, CCK_index = 0x%x\n",
- rCCK0_TxFilter1, TempCCk,
- priv->CCK_index);
break;
}
}
@@ -857,12 +794,10 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
}
tmpRegA = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
- RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
if (tmpRegA < 3 || tmpRegA > 13)
return;
if (tmpRegA >= 12)
tmpRegA = 12;
- RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
priv->ThermalMeter[0] = ThermalMeterVal;
priv->ThermalMeter[1] = ThermalMeterVal;
@@ -894,9 +829,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
priv->Record_CCK_20Mindex = tmpCCK20Mindex;
priv->Record_CCK_40Mindex = tmpCCK40Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "Record_CCK_20Mindex / Record_CCK_40Mindex = %d / %d.\n",
- priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex);
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
@@ -919,9 +851,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
priv->OFDM_index[0] = tmpOFDMindex;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
OFDMSwingTable[priv->OFDM_index[0]]);
- RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
- priv->OFDM_index[0],
- OFDMSwingTable[priv->OFDM_index[0]]);
}
priv->txpower_count = 0;
}
@@ -960,8 +889,6 @@ static void _rtl92e_dm_init_tx_power_tracking_thermal(struct net_device *dev)
priv->btxpower_tracking = false;
priv->txpower_count = 0;
priv->btxpower_trackingInit = false;
- RT_TRACE(COMP_POWER_TRACKING, "pMgntInfo->bTXPowerTracking = %d\n",
- priv->btxpower_tracking);
}
void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
@@ -979,7 +906,6 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
static u32 tx_power_track_counter;
- RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
if (rtl92e_readb(dev, 0x11e) == 1)
return;
if (!priv->btxpower_tracking)
@@ -1086,44 +1012,29 @@ static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1,
- TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2,
- TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort,
- TempVal);
} else {
TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
(CCKSwingTable_Ch14[priv->CCK_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch14[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch14[priv->CCK_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
}
}
@@ -1141,32 +1052,12 @@ static void _rtl92e_dm_tx_power_reset_recovery(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",
- dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery : RF A I/Q Amplify Gain is %d\n",
- dm_tx_bb_gain_idx_to_amplify(priv->rfa_txpowertrackingindex));
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: CCK Attenuation is %d dB\n",
- priv->CCKPresentAttentuation);
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",
- dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",
- priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery : RF C I/Q Amplify Gain is %d\n",
- dm_tx_bb_gain_idx_to_amplify(priv->rfc_txpowertrackingindex));
}
void rtl92e_dm_restore_state(struct net_device *dev)
@@ -1176,8 +1067,6 @@ void rtl92e_dm_restore_state(struct net_device *dev)
u32 ratr_value;
if (!priv->up) {
- RT_TRACE(COMP_RATE,
- "<---- %s: driver is going to unload\n", __func__);
return;
}
@@ -1218,17 +1107,6 @@ static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
bit_mask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
(u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",
- priv->initgain_backup.cca);
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
@@ -1251,17 +1129,6 @@ void rtl92e_dm_backup_state(struct net_device *dev)
priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
-
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n",
- priv->initgain_backup.cca);
}
static void _rtl92e_dm_dig_init(struct net_device *dev)
@@ -1681,13 +1548,13 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->state != RTLLIB_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
- if (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
+ if (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
if (!priv->rtllib->bis_any_nonbepkts) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if (pHTInfo->IOTAction & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
+ if (pHTInfo->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
if (curTxOkCnt > 4*curRxOkCnt) {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
@@ -1766,16 +1633,16 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
unsigned long curRxOkCnt = 0;
if (!priv->rtllib->bCTSToSelfEnable) {
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (curRxOkCnt > 4*curTxOkCnt)
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
else
- pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
@@ -1798,7 +1665,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
struct r8192_priv, gpio_change_rf_wq);
struct net_device *dev = priv->rtllib->dev;
u8 tmp1byte;
- enum rt_rf_power_state eRfPowerStateToSet;
+ enum rt_rf_power_state rf_power_state_to_set;
bool bActuallySet = false;
char *argv[3];
static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
@@ -1817,25 +1684,23 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
tmp1byte = rtl92e_readb(dev, GPI);
- eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
+ rf_power_state_to_set = (tmp1byte&BIT1) ? rf_on : rf_off;
- if (priv->bHwRadioOff && (eRfPowerStateToSet == eRfOn)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
+ if (priv->hw_radio_off && (rf_power_state_to_set == rf_on)) {
netdev_info(dev, "gpiochangeRF - HW Radio ON\n");
- priv->bHwRadioOff = false;
+ priv->hw_radio_off = false;
bActuallySet = true;
- } else if (!priv->bHwRadioOff && (eRfPowerStateToSet == eRfOff)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
+ } else if (!priv->hw_radio_off && (rf_power_state_to_set == rf_off)) {
netdev_info(dev, "gpiochangeRF - HW Radio OFF\n");
- priv->bHwRadioOff = true;
+ priv->hw_radio_off = true;
bActuallySet = true;
}
if (bActuallySet) {
mdelay(1000);
priv->bHwRfOffAction = 1;
- rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- if (priv->bHwRadioOff)
+ rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
+ if (priv->hw_radio_off)
argv[1] = "RFOFF";
else
argv[1] = "RFON";
@@ -2132,7 +1997,7 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
if (priv->rtllib->state == RTLLIB_LINKED &&
priv->rtllib->bfsync_enable &&
- (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
+ (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_CDD_FSYNC)) {
u32 rate_bitmap;
for (rate_index = 0; rate_index <= 27; rate_index++) {
@@ -2173,10 +2038,6 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
}
priv->rate_record = rate_count;
priv->rateCountDiffRecord = rate_count_diff;
- RT_TRACE(COMP_HALDM,
- "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n",
- priv->rate_record, rate_count, rate_count_diff,
- priv->bswitch_fsync);
if (priv->undecorated_smoothed_pwdb >
priv->rtllib->fsync_rssi_threshold &&
bSwitchFromCountDiff) {
@@ -2220,11 +2081,6 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
priv->ContinueDiffCount = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
- RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
- RT_TRACE(COMP_HALDM,
- "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n",
- priv->rate_record, rate_count, rate_count_diff,
- priv->bswitch_fsync);
}
static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
@@ -2232,7 +2088,6 @@ static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
u8 rf_timing = 0x77;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
@@ -2244,7 +2099,6 @@ static void _rtl92e_dm_end_hw_fsync(struct net_device *dev)
u8 rf_timing = 0xaa;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
@@ -2255,7 +2109,6 @@ static void _rtl92e_dm_end_sw_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
del_timer_sync(&(priv->fsync_timer));
if (priv->bswitch_fsync) {
@@ -2276,7 +2129,6 @@ static void _rtl92e_dm_start_sw_fsync(struct net_device *dev)
u32 rateIndex;
u32 rateBitmap;
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
priv->rate_record = 0;
priv->ContinueDiffCount = 0;
priv->rateCountDiffRecord = 0;
@@ -2315,17 +2167,6 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
static u8 reg_c38_State = RegC38_Default;
static u32 reset_cnt;
- RT_TRACE(COMP_HALDM,
- "RSSI %d TimeInterval %d MultipleTimeInterval %d\n",
- priv->rtllib->fsync_rssi_threshold,
- priv->rtllib->fsync_time_interval,
- priv->rtllib->fsync_multiple_timeinterval);
- RT_TRACE(COMP_HALDM,
- "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n",
- priv->rtllib->fsync_rate_bitmap,
- priv->rtllib->fsync_firstdiff_ratethreshold,
- priv->rtllib->fsync_seconddiff_ratethreshold);
-
if (priv->rtllib->state == RTLLIB_LINKED &&
priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
@@ -2461,9 +2302,6 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
- RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n",
- priv->undecorated_smoothed_pwdb);
-
if (priv->rtllib->state == RTLLIB_LINKED) {
if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->bDynamicTxHighPower = true;
@@ -2484,9 +2322,6 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
(priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
- RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n",
- priv->rtllib->current_network.channel);
-
rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
@@ -2499,14 +2334,9 @@ static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev,
- Current_Tx_Rate_Reg);
-
- ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev,
- Initial_Tx_Rate_Reg);
-
- ieee->softmac_stats.txretrycount = rtl92e_readl(dev,
- Tx_Retry_Count_Reg);
+ ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev, CURRENT_TX_RATE_REG);
+ ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev, INITIAL_TX_RATE_REG);
+ ieee->softmac_stats.txretrycount = rtl92e_readl(dev, TX_RETRY_COUNT_REG);
}
static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index ea1b14bbcdcd..51e295d389a8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -42,9 +42,9 @@
#define TX_POWER_ATHEROAP_THRESH_HIGH 78
#define TX_POWER_ATHEROAP_THRESH_LOW 72
-#define Current_Tx_Rate_Reg 0x1e0
-#define Initial_Tx_Rate_Reg 0x1e1
-#define Tx_Retry_Count_Reg 0x1ac
+#define CURRENT_TX_RATE_REG 0x1e0
+#define INITIAL_TX_RATE_REG 0x1e1
+#define TX_RETRY_COUNT_REG 0x1ac
#define RegC38_TH 20
#define DM_Type_ByDriver 1
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 1d992d5c4e17..81e1bb856c60 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -16,11 +16,9 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u8 tmp;
- u16 LinkCtrlReg;
+ u16 link_ctrl_reg;
- pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg);
-
- RT_TRACE(COMP_INIT, "Link Control Register =%x\n", LinkCtrlReg);
+ pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &link_ctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT4;
@@ -33,28 +31,28 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- u16 DeviceID;
- u8 RevisionID;
- u16 IrqLine;
+ u16 device_id;
+ u8 revision_id;
+ u16 irq_line;
- DeviceID = pdev->device;
- RevisionID = pdev->revision;
- pci_read_config_word(pdev, 0x3C, &IrqLine);
+ device_id = pdev->device;
+ revision_id = pdev->revision;
+ pci_read_config_word(pdev, 0x3C, &irq_line);
priv->card_8192 = priv->ops->nic_type;
- if (DeviceID == 0x8192) {
- switch (RevisionID) {
+ if (device_id == 0x8192) {
+ switch (revision_id) {
case HAL_HW_PCI_REVISION_ID_8192PCIE:
dev_info(&pdev->dev,
"Adapter(8192 PCI-E) is found - DeviceID=%x\n",
- DeviceID);
+ device_id);
priv->card_8192 = NIC_8192E;
break;
case HAL_HW_PCI_REVISION_ID_8192SE:
dev_info(&pdev->dev,
"Adapter(8192SE) is found - DeviceID=%x\n",
- DeviceID);
+ device_id);
priv->card_8192 = NIC_8192SE;
break;
default:
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 5575186caebd..82b45c61ac75 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -32,7 +32,7 @@ int rtl92e_suspend(struct device *dev_d)
netif_device_detach(dev);
if (!priv->rtllib->bSupportRemoteWakeUp) {
- rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_INIT);
+ rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_INIT);
ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
rtl92e_writel(dev, CPU_GEN, ulRegRead);
@@ -83,10 +83,9 @@ int rtl92e_resume(struct device *dev_d)
dev->netdev_ops->ndo_open(dev);
if (!priv->rtllib->bSupportRemoteWakeUp)
- rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_INIT);
+ rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_INIT);
out:
- RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c5e89eb40342..8c00b111ddb2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -21,16 +21,12 @@ static void _rtl92e_hw_sleep(struct net_device *dev)
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG,
- "%s(): RF Change in progress!\n", __func__);
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__);
-
- rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS);
+ rtl92e_set_rf_state(dev, rf_sleep, RF_CHANGE_BY_PS);
}
void rtl92e_hw_sleep_wq(void *data)
@@ -48,17 +44,14 @@ void rtl92e_hw_wakeup(struct net_device *dev)
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG,
- "%s(): RF Change in progress!\n", __func__);
schedule_delayed_work(&priv->rtllib->hw_wakeup_wq,
msecs_to_jiffies(10));
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__);
- rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS);
+ rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_PS);
}
void rtl92e_hw_wakeup_wq(void *data)
@@ -110,15 +103,10 @@ static void _rtl92e_ps_update_rf_state(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "%s() --------->\n", __func__);
pPSC->bSwRfProcessing = true;
-
- RT_TRACE(COMP_PS, "%s(): Set RF to %s.\n", __func__,
- pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
pPSC->bSwRfProcessing = false;
- RT_TRACE(COMP_PS, "%s() <---------\n", __func__);
}
void rtl92e_ips_enter(struct net_device *dev)
@@ -126,15 +114,14 @@ void rtl92e_ips_enter(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
if (pPSC->bInactivePs) {
- rtState = priv->rtllib->eRFPowerState;
- if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
+ rt_state = priv->rtllib->rf_power_state;
+ if (rt_state == rf_on && !pPSC->bSwRfProcessing &&
(priv->rtllib->state != RTLLIB_LINKED) &&
(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
- RT_TRACE(COMP_PS, "%s(): Turn off RF.\n", __func__);
- pPSC->eInactivePowerState = eRfOff;
+ pPSC->eInactivePowerState = rf_off;
priv->isRFOff = true;
priv->bInPowerSaveMode = true;
_rtl92e_ps_update_rf_state(dev);
@@ -147,14 +134,13 @@ void rtl92e_ips_leave(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
if (pPSC->bInactivePs) {
- rtState = priv->rtllib->eRFPowerState;
- if (rtState != eRfOn && !pPSC->bSwRfProcessing &&
- priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
- RT_TRACE(COMP_PS, "%s(): Turn on RF.\n", __func__);
- pPSC->eInactivePowerState = eRfOn;
+ rt_state = priv->rtllib->rf_power_state;
+ if (rt_state != rf_on && !pPSC->bSwRfProcessing &&
+ priv->rtllib->rf_off_reason <= RF_CHANGE_BY_IPS) {
+ pPSC->eInactivePowerState = rf_on;
priv->bInPowerSaveMode = false;
_rtl92e_ps_update_rf_state(dev);
}
@@ -176,13 +162,13 @@ void rtl92e_ips_leave_wq(void *data)
void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
@@ -210,7 +196,6 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
return false;
- RT_TRACE(COMP_LPS, "%s(): set ieee->ps = %x\n", __func__, rtPsMode);
if (!priv->ps_force)
priv->rtllib->ps = rtPsMode;
if (priv->rtllib->sta_sleep != LPS_IS_WAKE &&
@@ -221,8 +206,6 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
priv->rtllib->sta_sleep = LPS_IS_WAKE;
spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
- RT_TRACE(COMP_DBG,
- "LPS leave: notify AP we are awaked ++++++++++ SendNullFunctionData\n");
rtllib_sta_ps_send_null_frame(priv->rtllib, 0);
spin_unlock_irqrestore(&(priv->rtllib->mgmt_tx_lock), flags);
}
@@ -236,12 +219,6 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "%s()...\n", __func__);
- RT_TRACE(COMP_PS,
- "pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdleCount is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
- pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
- RT_CHECK_FOR_HANG_PERIOD);
-
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->state == RTLLIB_LINKED))
|| (priv->rtllib->iw_mode == IW_MODE_ADHOC) ||
@@ -252,10 +229,6 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
if (pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
-
- RT_TRACE(COMP_LPS,
- "%s(): Enter 802.11 power save mode...\n", __func__);
-
if (!pPSC->bFwCtrlLPS) {
if (priv->rtllib->SetFwCmdHandler)
priv->rtllib->SetFwCmdHandler(
@@ -275,15 +248,8 @@ void rtl92e_leisure_ps_leave(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
-
- RT_TRACE(COMP_PS, "%s()...\n", __func__);
- RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
- pPSC->bLeisurePs, priv->rtllib->ps);
-
if (pPSC->bLeisurePs) {
if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
- RT_TRACE(COMP_LPS,
- "%s(): Busy Traffic , Leave 802.11 power save..\n", __func__);
_rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED);
if (!pPSC->bFwCtrlLPS) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 407effde5e71..4920cb49e381 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -52,7 +52,7 @@ static int _rtl92e_wx_set_rate(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -71,7 +71,7 @@ static int _rtl92e_wx_set_rts(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -99,7 +99,7 @@ static int _rtl92e_wx_set_power(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_warn(dev, "%s(): Can't set Power: Radio is Off.\n",
__func__);
return 0;
@@ -129,7 +129,7 @@ static int _rtl92e_wx_set_rawtx(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -149,8 +149,6 @@ static int _rtl92e_wx_force_reset(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n",
- __func__, *extra);
priv->force_reset = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
@@ -167,8 +165,6 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ?
- "DC power" : "AC power");
if (*extra || priv->force_lps) {
priv->ps_force = false;
pPSC->bLeisurePs = true;
@@ -228,7 +224,7 @@ static int _rtl92e_wx_set_debug(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
u8 c = *extra;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
netdev_info(dev, "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
@@ -247,18 +243,18 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = netdev_priv_rsl(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
int ret;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
mutex_lock(&priv->wx_mutex);
if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
ieee->bNetPromiscuousMode) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason >
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
@@ -379,7 +375,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
int ret;
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
@@ -391,12 +387,12 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
return 0;
}
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_info(dev, "================>%s(): hwradio off\n",
__func__);
return 0;
}
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true)
@@ -419,17 +415,14 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
if (priv->rtllib->state != RTLLIB_LINKED) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason >
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
mutex_unlock(&priv->wx_mutex);
return -1;
}
- RT_TRACE(COMP_PS,
- "=========>%s(): rtl92e_ips_leave\n",
- __func__);
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
@@ -440,7 +433,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
priv->rtllib->LedControlHandler(dev,
LED_CTL_SITE_SURVEY);
- if (priv->rtllib->eRFPowerState != eRfOff) {
+ if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
if (ieee->ScanOperationBackupHandler)
@@ -473,7 +466,7 @@ static int _rtl92e_wx_get_scan(struct net_device *dev,
if (!priv->up)
return -ENETDOWN;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -492,9 +485,9 @@ static int _rtl92e_wx_set_essid(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_info(dev,
- "=========>%s():hw radio off,or Rf state is eRfOff, return\n",
+ "=========>%s():hw radio off,or Rf state is rf_off, return\n",
__func__);
return 0;
}
@@ -560,7 +553,7 @@ static int _rtl92e_wx_set_freq(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -586,7 +579,7 @@ static int _rtl92e_wx_set_frag(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
if (wrqu->frag.disabled)
@@ -622,7 +615,7 @@ static int _rtl92e_wx_set_wap(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -669,7 +662,7 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
int i;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
if (!priv->up)
@@ -681,7 +674,6 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
mutex_unlock(&priv->rtllib->ips_mutex);
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
mutex_unlock(&priv->wx_mutex);
@@ -754,7 +746,7 @@ static int _rtl92e_wx_set_scan_type(struct net_device *dev,
int *parms = (int *)p;
int mode = parms[0];
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
priv->rtllib->active_scan = mode;
@@ -770,7 +762,7 @@ static int _rtl92e_wx_set_retry(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int err = 0;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -843,7 +835,7 @@ static int _rtl92e_wx_set_sens(struct net_device *dev,
short err = 0;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -870,7 +862,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -950,7 +942,7 @@ static int _rtl92e_wx_set_auth(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -967,7 +959,7 @@ static int _rtl92e_wx_set_mlme(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -984,7 +976,7 @@ static int _rtl92e_wx_set_gen_ie(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 7d04966afdd9..19d13b3fcecf 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -100,8 +100,6 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
*tag++ = pBA->dialog_token;
if (type == ACT_ADDBARSP) {
- RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
-
put_unaligned_le16(StatusCode, tag);
tag += 2;
}
@@ -183,7 +181,6 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
if (skb) {
- RT_TRACE(COMP_DBG, "====>to send ADDBAREQ!!!!!\n");
softmac_mgmt_xmit(skb, ieee);
} else {
netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n");
@@ -247,10 +244,9 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (union sequence_control *)(req + 7);
- RT_TRACE(COMP_DBG, "====>rx ADDBAREQ from : %pM\n", dst);
if (!ieee->current_network.qos_data.active ||
!ieee->pHTInfo->bCurrentHTSupport ||
- (ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
+ (ieee->pHTInfo->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev,
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
@@ -282,7 +278,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
pBA->ba_start_seq_ctrl = *pBaStartSeqCtrl;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
- (ieee->pHTInfo->IOTAction & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
+ (ieee->pHTInfo->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
pBA->ba_param_set.field.buffer_size = 1;
else
pBA->ba_param_set.field.buffer_size = 32;
@@ -330,7 +326,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pBaParamSet = (union ba_param_set *)(tag + 5);
pBaTimeoutVal = (u16 *)(tag + 7);
- RT_TRACE(COMP_DBG, "====>rx ADDBARSP from : %pM\n", dst);
if (!ieee->current_network.qos_data.active ||
!ieee->pHTInfo->bCurrentHTSupport ||
!ieee->pHTInfo->bCurrentAMPDUEnable) {
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index ce13b41074a7..76bc9c5a6d83 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -131,51 +131,40 @@ struct rt_hi_throughput {
u8 AMPDU_Factor;
u8 CurrentAMPDUFactor;
u8 MPDU_Density;
- u8 CurrentMPDUDensity;
+ u8 current_mpdu_density;
enum ht_aggre_mode ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
+ u8 forced_ampdu_factor;
+ u8 forced_mpdu_density;
enum ht_aggre_mode ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
+ u8 forced_short_gi;
- u8 bForcedShortGI;
+ u8 current_op_mode;
- u8 CurrentOpMode;
-
- u8 SelfMimoPs;
- u8 PeerMimoPs;
+ u8 self_mimo_ps;
+ u8 peer_mimo_ps;
enum ht_extchnl_offset CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz;
- u8 PeerBandwidth;
-
- u8 bSwBwInProgress;
- u8 SwBwStep;
-
- u8 bRegRT2RTAggregation;
+ u8 cur_tx_bw40mhz;
+ u8 sw_bw_in_progress;
+ u8 reg_rt2rt_aggregation;
u8 RT2RT_HT_Mode;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
- u8 bIsPeerBcm;
-
+ u8 current_rt2rt_aggregation;
+ u8 current_rt2rt_long_slot_time;
+ u8 sz_rt2rt_agg_buf[10];
+
+ u8 reg_rx_reorder_enable;
+ u8 cur_rx_reorder_enable;
+ u8 rx_reorder_win_size;
+ u8 rx_reorder_pending_time;
+ u16 rx_reorder_drop_counter;
u8 IOTPeer;
- u32 IOTAction;
- u8 IOTRaFunc;
+ u32 iot_action;
+ u8 iot_ra_func;
u8 bWAIotBroadcom;
u8 WAIotTH;
-
- u8 bAcceptAddbaReq;
} __packed;
struct bss_ht {
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 3b8efaf9b88c..ef3dca51cf99 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -70,9 +70,6 @@ static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
void HTUpdateDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
-
- pHTInfo->bAcceptAddbaReq = 1;
-
pHTInfo->bRegShortGI20MHz = 1;
pHTInfo->bRegShortGI40MHz = 1;
@@ -90,19 +87,19 @@ void HTUpdateDefaultSetting(struct rtllib_device *ieee)
pHTInfo->AMPDU_Factor = 2;
pHTInfo->MPDU_Density = 0;
- pHTInfo->SelfMimoPs = 3;
- if (pHTInfo->SelfMimoPs == 2)
- pHTInfo->SelfMimoPs = 3;
+ pHTInfo->self_mimo_ps = 3;
+ if (pHTInfo->self_mimo_ps == 2)
+ pHTInfo->self_mimo_ps = 3;
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
ieee->bTxEnableFwCalcDur = 1;
- pHTInfo->bRegRT2RTAggregation = 1;
+ pHTInfo->reg_rt2rt_aggregation = 1;
- pHTInfo->bRegRxReorderEnable = 1;
- pHTInfo->RxReorderWinSize = 64;
- pHTInfo->RxReorderPendingTime = 30;
+ pHTInfo->reg_rx_reorder_enable = 1;
+ pHTInfo->rx_reorder_win_size = 64;
+ pHTInfo->rx_reorder_pending_time = 30;
}
static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
@@ -254,20 +251,20 @@ static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
- pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL;
+ pHTInfo->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL;
if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
- pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R;
+ pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R;
- if (pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE)
- pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU;
+ if (pHTInfo->iot_action & HT_IOT_ACT_AMSDU_ENABLE)
+ pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU;
}
void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo)
{
- pHTInfo->IOTAction = 0;
+ pHTInfo->iot_action = 0;
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_ra_func = 0;
}
void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
@@ -300,7 +297,7 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
else
pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
- pCapELE->MimoPwrSave = pHT->SelfMimoPs;
+ pCapELE->MimoPwrSave = pHT->self_mimo_ps;
pCapELE->GreenField = 0;
pCapELE->ShortGI20Mhz = 1;
pCapELE->ShortGI40Mhz = 1;
@@ -332,16 +329,16 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
pCapELE->ASCap = 0;
if (bAssoc) {
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
pCapELE->ShortGI40Mhz = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
@@ -377,7 +374,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
- pHTInfoEle->OptMode = pHT->CurrentOpMode;
+ pHTInfoEle->OptMode = pHT->current_op_mode;
pHTInfoEle->NonGFDevPresent = 0;
pHTInfoEle->DualBeacon = 0;
pHTInfoEle->SecondaryBeacon = 0;
@@ -506,7 +503,7 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void HTOnAssocRsp(struct rtllib_device *ieee)
@@ -543,7 +540,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
#endif
HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
(enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
- pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
+ pHTInfo->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
true : false);
pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ?
@@ -574,7 +571,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
pHTInfo->bCurrentAMPDUEnable = false;
}
- if (!pHTInfo->bRegRT2RTAggregation) {
+ if (!pHTInfo->reg_rt2rt_aggregation) {
if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
@@ -597,15 +594,14 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
}
}
if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density;
else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
- if (pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) {
+ pHTInfo->current_mpdu_density = pPeerHTCap->MPDUDensity;
+ if (pHTInfo->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
pHTInfo->bCurrentAMPDUEnable = false;
pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
- pHTInfo->ForcedAMSDUMaxSize = 7935;
}
- pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
+ pHTInfo->cur_rx_reorder_enable = pHTInfo->reg_rx_reorder_enable;
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
@@ -614,8 +610,8 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
- pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
- if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
+ pHTInfo->peer_mimo_ps = pPeerHTCap->MimoPwrSave;
+ if (pHTInfo->peer_mimo_ps == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
@@ -623,7 +619,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
ieee->dot11HTOperationalRateSet, pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+ pHTInfo->current_op_mode = pPeerHTInfo->OptMode;
}
void HTInitializeHTInfo(struct rtllib_device *ieee)
@@ -633,17 +629,17 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurBW40MHz = false;
- pHTInfo->bCurTxBW40MHz = false;
+ pHTInfo->cur_tx_bw40mhz = false;
pHTInfo->bCurShortGI20MHz = false;
pHTInfo->bCurShortGI40MHz = false;
- pHTInfo->bForcedShortGI = false;
+ pHTInfo->forced_short_gi = false;
pHTInfo->bCurSuppCCK = true;
pHTInfo->bCurrent_AMSDU_Support = false;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
memset((void *)(&(pHTInfo->SelfHTCap)), 0,
@@ -655,17 +651,17 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0,
sizeof(pHTInfo->PeerHTInfoBuf));
- pHTInfo->bSwBwInProgress = false;
+ pHTInfo->sw_bw_in_progress = false;
pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
pHTInfo->IOTPeer = 0;
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_action = 0;
+ pHTInfo->iot_ra_func = 0;
{
u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
@@ -717,51 +713,51 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
- if (pHTInfo->bRegRT2RTAggregation) {
- pHTInfo->bCurrentRT2RTAggregation =
+ if (pHTInfo->reg_rt2rt_aggregation) {
+ pHTInfo->current_rt2rt_aggregation =
pNetwork->bssht.bd_rt2rt_aggregation;
- pHTInfo->bCurrentRT2RTLongSlotTime =
+ pHTInfo->current_rt2rt_long_slot_time =
pNetwork->bssht.bd_rt2rt_long_slot_time;
pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode;
} else {
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
}
HTIOTPeerDetermine(ieee);
- pHTInfo->IOTAction = 0;
+ pHTInfo->iot_action = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
+ pHTInfo->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
+ pHTInfo->iot_action |= HT_IOT_ACT_CDD_FSYNC;
} else {
pHTInfo->bCurrentHTSupport = false;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_action = 0;
+ pHTInfo->iot_ra_func = 0;
}
}
@@ -774,7 +770,7 @@ void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
if (pHTInfo->bCurrentHTSupport) {
if (pNetwork->bssht.bd_ht_info_len != 0)
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+ pHTInfo->current_op_mode = pPeerHTInfo->OptMode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
@@ -801,7 +797,7 @@ void HTUseDefaultSetting(struct rtllib_device *ieee)
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
- pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity;
+ pHTInfo->current_mpdu_density = pHTInfo->current_mpdu_density;
HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet,
ieee->dot11HTOperationalRateSet);
@@ -850,11 +846,11 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
HT_EXTCHNL_OFFSET_NO_EXT);
}
- pHTInfo->bSwBwInProgress = false;
+ pHTInfo->sw_bw_in_progress = false;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
@@ -863,13 +859,13 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- Bandwidth = HT_CHANNEL_WIDTH_20;
+ bandwidth = HT_CHANNEL_WIDTH_20;
- if (pHTInfo->bSwBwInProgress) {
- pr_info("%s: bSwBwInProgress!!\n", __func__);
+ if (pHTInfo->sw_bw_in_progress) {
+ pr_info("%s: sw_bw_in_progress!!\n", __func__);
return;
}
- if (Bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
if (ieee->current_network.channel < 2 &&
Offset == HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
@@ -889,7 +885,7 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
pHTInfo->bCurBW40MHz);
- pHTInfo->bSwBwInProgress = true;
+ pHTInfo->sw_bw_in_progress = true;
HTSetConnectBwModeCallback(ieee);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 34b00a76b6bd..05c7e822f372 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -83,7 +83,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
mod_timer(&pRxTs->rx_pkt_pending_timer, jiffies +
- msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime)
+ msecs_to_jiffies(ieee->pHTInfo->rx_reorder_pending_time)
);
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 0ecd81a81866..3c72ed2a30a4 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1244,9 +1244,9 @@ enum ips_callback_function {
};
enum rt_rf_power_state {
- eRfOn,
- eRfSleep,
- eRfOff
+ rf_on,
+ rf_sleep,
+ rf_off
};
struct rt_pwr_save_ctrl {
@@ -1434,8 +1434,8 @@ struct rtllib_device {
bool FirstIe_InScan;
bool be_scan_inprogress;
bool beinretry;
- enum rt_rf_power_state eRFPowerState;
- RT_RF_CHANGE_SOURCE RfOffReason;
+ enum rt_rf_power_state rf_power_state;
+ RT_RF_CHANGE_SOURCE rf_off_reason;
bool is_set_key;
bool wx_set_enc;
struct rt_hi_throughput *pHTInfo;
@@ -1765,7 +1765,7 @@ struct rtllib_device {
/* check whether Tx hw resource available */
short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
void (*SetBWModeHandler)(struct net_device *dev,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
@@ -1938,7 +1938,7 @@ int rtllib_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
void rtllib_txb_free(struct rtllib_txb *txb);
/* rtllib_rx.c */
@@ -2073,7 +2073,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
#define MAX_RECEIVE_BUFFER_SIZE 9100
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void HTUpdateDefaultSetting(struct rtllib_device *ieee);
void HTConstructCapabilityElement(struct rtllib_device *ieee,
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index e3e8302945eb..f6b23defe225 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -46,10 +46,4 @@ enum RTL_DEBUG {
COMP_ERR = BIT(31)
};
-#define RT_TRACE(component, x, args...) \
-do { \
- if (rt_global_debug_component & component) \
- printk(KERN_DEBUG DRV_NAME ":" x "\n", ##args);\
-} while (0)
-
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index abe5c153f74e..46d75e925ee9 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -569,7 +569,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct rx_reorder_entry *pReorderEntry = NULL;
- u8 WinSize = pHTInfo->RxReorderWinSize;
+ u8 WinSize = pHTInfo->rx_reorder_win_size;
u16 WinEnd = 0;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
@@ -591,7 +591,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
- pHTInfo->RxReorderDropCounter++;
+ pHTInfo->rx_reorder_drop_counter++;
{
int i;
@@ -755,7 +755,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s(): SET rx timeout timer\n", __func__);
pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
mod_timer(&pTS->rx_pkt_pending_timer, jiffies +
- msecs_to_jiffies(pHTInfo->RxReorderPendingTime));
+ msecs_to_jiffies(pHTInfo->rx_reorder_pending_time));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
@@ -924,7 +924,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
- if (!ieee->pHTInfo->bCurRxReorderEnable ||
+ if (!ieee->pHTInfo->cur_rx_reorder_enable ||
!ieee->current_network.qos_data.active ||
!IsDataFrame(skb->data) ||
IsLegacyDataFrame(skb->data)) {
@@ -1442,7 +1442,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
}
/* Indicate packets to upper layer or Rx Reorder */
- if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL || bToOtherSTA)
+ if (!ieee->pHTInfo->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b5f4d35954a9..1a3ca3e57623 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -180,7 +180,7 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 rate;
- if (pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
+ if (pHTInfo->iot_action & HT_IOT_ACT_MGNT_USE_CCK_6M)
rate = 0x0c;
else
rate = ieee->basic_rate & 0x7f;
@@ -586,9 +586,9 @@ static void rtllib_softmac_scan_wq(void *data)
mutex_lock(&ieee->scan_mutex);
- if (ieee->eRFPowerState == eRfOff) {
+ if (ieee->rf_power_state == rf_off) {
netdev_info(ieee->dev,
- "======>%s():rf state is eRfOff, return\n",
+ "======>%s():rf state is rf_off, return\n",
__func__);
goto out1;
}
@@ -865,10 +865,10 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
encrypt);
- if (pHTInfo->bRegRT2RTAggregation) {
- tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ if (pHTInfo->reg_rt2rt_aggregation) {
+ tmp_generic_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf;
tmp_generic_ie_len =
- sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
&tmp_generic_ie_len);
}
@@ -1189,10 +1189,10 @@ rtllib_association_req(struct rtllib_network *beacon,
ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
- realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ if (ieee->pHTInfo->current_rt2rt_aggregation) {
+ realtek_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf;
realtek_ie_len =
- sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
&realtek_ie_len);
}
@@ -1368,7 +1368,7 @@ rtllib_association_req(struct rtllib_network *beacon,
tag += ht_cap_len - 2;
}
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
+ if (ieee->pHTInfo->current_rt2rt_aggregation) {
tag = skb_put(skb, realtek_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = realtek_ie_len - 2;
@@ -1584,13 +1584,8 @@ static void rtllib_associate_procedure_wq(void *data)
ieee->data_hard_stop(ieee->dev);
rtllib_stop_scan(ieee);
- RT_TRACE(COMP_DBG, "===>%s(), chan:%d\n", __func__,
- ieee->current_network.channel);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- if (ieee->eRFPowerState == eRfOff) {
- RT_TRACE(COMP_DBG,
- "=============>%s():Rf state is eRfOff, schedule ipsleave wq again,return\n",
- __func__);
+ if (ieee->rf_power_state == rf_off) {
if (ieee->rtllib_ips_leave_wq != NULL)
ieee->rtllib_ips_leave_wq(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
@@ -1611,7 +1606,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
- /* we are interested in new new only if we are not associated
+ /* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
if (ieee->state != RTLLIB_NOLINK)
@@ -1899,7 +1894,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
((ieee->mode == IEEE_G) &&
(ieee->current_network.mode == IEEE_N_24G) &&
(ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
- ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
+ ieee->pHTInfo->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
ieee->AsocRetryCount = 0;
}
@@ -2062,9 +2057,6 @@ static inline void rtllib_sta_ps(struct work_struct *work)
if ((ieee->ps == RTLLIB_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
ieee->state != RTLLIB_LINKED)) {
- RT_TRACE(COMP_DBG,
- "=====>%s(): no need to ps,wake up!! ieee->ps is %d, ieee->iw_mode is %d, ieee->state is %d\n",
- __func__, ieee->ps, ieee->iw_mode, ieee->state);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
rtllib_sta_wakeup(ieee, 1);
@@ -2109,7 +2101,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
{
if (ieee->sta_sleep == LPS_IS_WAKE) {
if (nl) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
@@ -2125,7 +2117,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
if (ieee->sta_sleep == LPS_IS_SLEEP)
ieee->sta_wake_up(ieee->dev);
if (nl) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
@@ -2160,7 +2152,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
if ((ieee->sta_sleep == LPS_IS_WAKE) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING)
rtllib_sta_ps_send_null_frame(ieee, 0);
else
@@ -2304,7 +2296,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
if (ieee->open_wep || !challenge) {
ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
- if (!(ieee->pHTInfo->IOTAction & HT_IOT_ACT_PURE_N_MODE)) {
+ if (!(ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
if (IsHTHalfNmodeAPs(ieee)) {
bSupportNmode = true;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 70a62ca0f69a..f9589c5b62ba 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -364,8 +364,6 @@ void rtllib_wx_sync_scan_wq(void *data)
b40M = 1;
chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz;
- RT_TRACE(COMP_DBG, "Scan in 40M, force to 20M first:%d, %d\n",
- chan_offset, bandwidth);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
@@ -373,7 +371,6 @@ void rtllib_wx_sync_scan_wq(void *data)
rtllib_start_scan_syncro(ieee, 0);
if (b40M) {
- RT_TRACE(COMP_DBG, "Scan in 20M, back to 40M\n");
if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev, chan + 2);
else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
@@ -571,14 +568,11 @@ int rtllib_wx_set_power(struct rtllib_device *ieee,
mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
- RT_TRACE(COMP_DBG, "===>%s(): power disable\n", __func__);
ieee->ps = RTLLIB_PS_DISABLED;
goto exit;
}
if (wrqu->power.flags & IW_POWER_TIMEOUT) {
ieee->ps_timeout = wrqu->power.value / 1000;
- RT_TRACE(COMP_DBG, "===>%s():ps_timeout is %d\n", __func__,
- ieee->ps_timeout);
}
if (wrqu->power.flags & IW_POWER_PERIOD)
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 42f81b23a144..e307020580a0 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -284,7 +284,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
return;
- if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
+ if (pHTInfo->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
return;
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
@@ -315,7 +315,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (ieee->iw_mode == IW_MODE_INFRA) {
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
- tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
+ tcb_desc->ampdu_density = pHTInfo->current_mpdu_density;
}
}
FORCED_AGG_SETTING:
@@ -325,8 +325,8 @@ FORCED_AGG_SETTING:
case HT_AGG_FORCE_ENABLE:
tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
- tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
+ tcb_desc->ampdu_density = pHTInfo->forced_mpdu_density;
+ tcb_desc->ampdu_factor = pHTInfo->forced_ampdu_factor;
break;
case HT_AGG_FORCE_DISABLE:
@@ -358,7 +358,7 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
- if (pHTInfo->bForcedShortGI) {
+ if (pHTInfo->forced_short_gi) {
tcb_desc->bUseShortGI = true;
return;
}
@@ -384,7 +384,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
if ((tcb_desc->data_rate & 0x80) == 0)
return;
- if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
+ if (pHTInfo->bCurBW40MHz && pHTInfo->cur_tx_bw40mhz &&
!ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
}
@@ -422,12 +422,12 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
pHTInfo = ieee->pHTInfo;
while (true) {
- if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
+ if (pHTInfo->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
- } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
+ } else if (pHTInfo->iot_action & (HT_IOT_ACT_FORCED_RTS |
HT_IOT_ACT_PURE_N_MODE)) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
@@ -440,7 +440,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
break;
}
if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
- u8 HTOpMode = pHTInfo->CurrentOpMode;
+ u8 HTOpMode = pHTInfo->current_op_mode;
if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
HTOpMode == 3)) ||
@@ -885,7 +885,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->priority = skb->priority;
if (ether_type == ETH_P_PAE) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
@@ -910,7 +910,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->data_rate = rtllib_current_rate(ieee);
if (bdhcp) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
@@ -962,9 +962,9 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
-int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
memset(skb->cb, 0, sizeof(skb->cb));
- return rtllib_xmit_inter(skb, dev);
+ return rtllib_xmit_inter(skb, dev) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
EXPORT_SYMBOL(rtllib_xmit);
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 0be7426b6ebc..d32dfd89a606 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -8,6 +8,7 @@ ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
r819xU_cmdpkt.o r8192U_dm.o r819xU_firmware_img.o \
+ r8192U_debugfs.o \
ieee80211/ieee80211_crypt.o \
ieee80211/ieee80211_crypt_tkip.o \
ieee80211/ieee80211_crypt_ccmp.o \
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index b577f9c81f85..9cd4b1896745 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2178,7 +2178,7 @@ int ieee80211_set_encryption(struct ieee80211_device *ieee);
int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
struct sk_buff *frag, int hdr_len);
-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
void ieee80211_txb_free(struct ieee80211_txb *txb);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 8602e3a6c837..e4b6454809a0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -526,7 +526,7 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
}
}
-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
@@ -822,13 +822,13 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += __le16_to_cpu(txb->payload_size);
- return 0;
+ return NETDEV_TX_OK;
}
ieee80211_txb_free(txb);
}
}
- return 0;
+ return NETDEV_TX_OK;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 1942cb849374..ff0ada00bf41 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1061,6 +1061,9 @@ typedef struct r8192_priv {
struct delayed_work gpio_change_rf_wq;
struct delayed_work initialgain_operate_wq;
struct workqueue_struct *priv_wq;
+
+ /* debugfs */
+ struct dentry *debugfs_dir;
} r8192_priv;
/* For rtl8187B */
@@ -1117,4 +1120,10 @@ void EnableHWSecurityConfig8192(struct net_device *dev);
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
+void rtl8192_debugfs_init_one(struct net_device *dev);
+void rtl8192_debugfs_exit_one(struct net_device *dev);
+void rtl8192_debugfs_rename_one(struct net_device *dev);
+void rtl8192_debugfs_init(void);
+void rtl8192_debugfs_exit(void);
+
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2ca925f35830..0a60ef20107c 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -56,7 +56,6 @@ double __extendsfdf2(float a)
#include "r8192U_dm.h"
#include <linux/usb.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
/* FIXME: check if 2.6.7 is ok */
@@ -453,179 +452,6 @@ static void rtl8192_restart(struct work_struct *work);
static void watch_dog_timer_callback(struct timer_list *t);
/****************************************************************************
- * -----------------------------PROCFS STUFF-------------------------
- ****************************************************************************/
-
-static struct proc_dir_entry *rtl8192_proc;
-
-static int __maybe_unused proc_get_stats_ap(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
-
- list_for_each_entry(target, &ieee->network_list, list) {
- const char *wpa = "non_WPA";
-
- if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
- wpa = "WPA";
-
- seq_printf(m, "%s %s\n", target->ssid, wpa);
- }
-
- return 0;
-}
-
-static int __maybe_unused proc_get_registers(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- int i, n, max = 0xff;
- u8 byte_rd;
-
- seq_puts(m, "\n####################page 0##################\n ");
-
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x000 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 1##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x100 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 3##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x300 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-static int __maybe_unused proc_get_stats_tx(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- seq_printf(m,
- "TX VI priority ok int: %lu\n"
- "TX VI priority error int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX VO priority error int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BE priority error int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX BK priority error int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX MANAGE priority error int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
- "TX queue resume: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
- "TX VI queue: %d\n"
- "TX VO queue: %d\n"
- "TX BE queue: %d\n"
- "TX BK queue: %d\n"
- "TX VI dropped: %lu\n"
- "TX VO dropped: %lu\n"
- "TX BE dropped: %lu\n"
- "TX BK dropped: %lu\n"
- "TX total data packets %lu\n",
- priv->stats.txviokint,
- priv->stats.txvierr,
- priv->stats.txvookint,
- priv->stats.txvoerr,
- priv->stats.txbeokint,
- priv->stats.txbeerr,
- priv->stats.txbkokint,
- priv->stats.txbkerr,
- priv->stats.txmanageokint,
- priv->stats.txmanageerr,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
- priv->stats.txresumed,
- netif_queue_stopped(dev),
- priv->stats.txoverflow,
- atomic_read(&(priv->tx_pending[VI_PRIORITY])),
- atomic_read(&(priv->tx_pending[VO_PRIORITY])),
- atomic_read(&(priv->tx_pending[BE_PRIORITY])),
- atomic_read(&(priv->tx_pending[BK_PRIORITY])),
- priv->stats.txvidrop,
- priv->stats.txvodrop,
- priv->stats.txbedrop,
- priv->stats.txbkdrop,
- priv->stats.txdatapkt
- );
-
- return 0;
-}
-
-static int __maybe_unused proc_get_stats_rx(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- seq_printf(m,
- "RX packets: %lu\n"
- "RX urb status error: %lu\n"
- "RX invalid urb error: %lu\n",
- priv->stats.rxoktotal,
- priv->stats.rxstaterr,
- priv->stats.rxurberr);
-
- return 0;
-}
-
-static void rtl8192_proc_module_init(void)
-{
- RT_TRACE(COMP_INIT, "Initializing proc filesystem");
- rtl8192_proc = proc_mkdir(RTL819XU_MODULE_NAME, init_net.proc_net);
-}
-
-static void rtl8192_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir;
-
- if (!rtl8192_proc)
- return;
-
- dir = proc_mkdir_data(dev->name, 0, rtl8192_proc, dev);
- if (!dir)
- return;
-
- proc_create_single("stats-rx", S_IFREG | 0444, dir,
- proc_get_stats_rx);
- proc_create_single("stats-tx", S_IFREG | 0444, dir,
- proc_get_stats_tx);
- proc_create_single("stats-ap", S_IFREG | 0444, dir,
- proc_get_stats_ap);
- proc_create_single("registers", S_IFREG | 0444, dir,
- proc_get_registers);
-}
-
-static void rtl8192_proc_remove_one(struct net_device *dev)
-{
- remove_proc_subtree(dev->name, rtl8192_proc);
-}
-
-/****************************************************************************
* -----------------------------MISC STUFF-------------------------
*****************************************************************************/
@@ -4730,7 +4556,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
goto fail2;
RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
- rtl8192_proc_init_one(dev);
+ rtl8192_debugfs_init_one(dev);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -4764,10 +4590,11 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
struct net_device *dev = usb_get_intfdata(intf);
struct r8192_priv *priv = ieee80211_priv(dev);
- unregister_netdev(dev);
RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
- rtl8192_proc_remove_one(dev);
+ rtl8192_debugfs_exit_one(dev);
+
+ unregister_netdev(dev);
rtl8192_down(dev);
kfree(priv->pFirmware);
@@ -4779,6 +4606,30 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
+static int rtl8192_usb_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(data);
+
+ if (netdev->netdev_ops != &rtl8192_netdev_ops)
+ goto out;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ rtl8192_debugfs_rename_one(netdev);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rtl8192_usb_netdev_notifier = {
+ .notifier_call = rtl8192_usb_netdev_event,
+};
+
static int __init rtl8192_usb_module_init(void)
{
int ret;
@@ -4788,10 +4639,17 @@ static int __init rtl8192_usb_module_init(void)
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
+ ret = register_netdevice_notifier(&rtl8192_usb_netdev_notifier);
+ if (ret) {
+ pr_err("register_netdevice_notifier failed %d\n", ret);
+ return ret;
+ }
+
+ rtl8192_debugfs_init();
ret = ieee80211_debug_init();
if (ret) {
pr_err("ieee80211_debug_init() failed %d\n", ret);
- return ret;
+ goto debugfs_exit;
}
ret = ieee80211_crypto_init();
@@ -4818,14 +4676,12 @@ static int __init rtl8192_usb_module_init(void)
goto crypto_ccmp_exit;
}
- rtl8192_proc_module_init();
ret = usb_register(&rtl8192_usb_driver);
if (ret)
- goto rtl8192_proc_module_exit;
+ goto crypto_wep_exit;
return ret;
-rtl8192_proc_module_exit:
- remove_proc_entry(RTL819XU_MODULE_NAME, init_net.proc_net);
+crypto_wep_exit:
ieee80211_crypto_wep_exit();
crypto_ccmp_exit:
ieee80211_crypto_ccmp_exit();
@@ -4835,18 +4691,22 @@ crypto_exit:
ieee80211_crypto_deinit();
debug_exit:
ieee80211_debug_exit();
+debugfs_exit:
+ rtl8192_debugfs_exit();
+ unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
return ret;
}
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
- remove_proc_entry(RTL819XU_MODULE_NAME, init_net.proc_net);
ieee80211_crypto_wep_exit();
ieee80211_crypto_ccmp_exit();
ieee80211_crypto_tkip_exit();
ieee80211_crypto_deinit();
ieee80211_debug_exit();
+ rtl8192_debugfs_exit();
+ unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
RT_TRACE(COMP_DOWN, "Exiting");
}
diff --git a/drivers/staging/rtl8192u/r8192U_debugfs.c b/drivers/staging/rtl8192u/r8192U_debugfs.c
new file mode 100644
index 000000000000..fe8ef72506ee
--- /dev/null
+++ b/drivers/staging/rtl8192u/r8192U_debugfs.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/****************************************************************************
+ * -----------------------------DEGUGFS STUFF-------------------------
+ ****************************************************************************/
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "r8192U.h"
+
+#define KBUILD_MODNAME "r8192u_usb"
+
+static int rtl8192_usb_stats_ap_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct ieee80211_network *target;
+
+ list_for_each_entry(target, &ieee->network_list, list) {
+ const char *wpa = "non_WPA";
+
+ if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
+ wpa = "WPA";
+
+ seq_printf(m, "%s %s\n", target->ssid, wpa);
+ }
+
+ return 0;
+}
+
+static int rtl8192_usb_registers_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ int i, n, max = 0xff;
+ u8 byte_rd;
+
+ seq_puts(m, "\n####################page 0##################\n ");
+
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x000 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_puts(m, "\n####################page 1##################\n ");
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x100 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_puts(m, "\n####################page 3##################\n ");
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x300 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static int rtl8192_usb_stats_tx_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ seq_printf(m,
+ "TX VI priority ok int: %lu\n"
+ "TX VI priority error int: %lu\n"
+ "TX VO priority ok int: %lu\n"
+ "TX VO priority error int: %lu\n"
+ "TX BE priority ok int: %lu\n"
+ "TX BE priority error int: %lu\n"
+ "TX BK priority ok int: %lu\n"
+ "TX BK priority error int: %lu\n"
+ "TX MANAGE priority ok int: %lu\n"
+ "TX MANAGE priority error int: %lu\n"
+ "TX BEACON priority ok int: %lu\n"
+ "TX BEACON priority error int: %lu\n"
+ "TX queue resume: %lu\n"
+ "TX queue stopped?: %d\n"
+ "TX fifo overflow: %lu\n"
+ "TX VI queue: %d\n"
+ "TX VO queue: %d\n"
+ "TX BE queue: %d\n"
+ "TX BK queue: %d\n"
+ "TX VI dropped: %lu\n"
+ "TX VO dropped: %lu\n"
+ "TX BE dropped: %lu\n"
+ "TX BK dropped: %lu\n"
+ "TX total data packets %lu\n",
+ priv->stats.txviokint,
+ priv->stats.txvierr,
+ priv->stats.txvookint,
+ priv->stats.txvoerr,
+ priv->stats.txbeokint,
+ priv->stats.txbeerr,
+ priv->stats.txbkokint,
+ priv->stats.txbkerr,
+ priv->stats.txmanageokint,
+ priv->stats.txmanageerr,
+ priv->stats.txbeaconokint,
+ priv->stats.txbeaconerr,
+ priv->stats.txresumed,
+ netif_queue_stopped(dev),
+ priv->stats.txoverflow,
+ atomic_read(&(priv->tx_pending[VI_PRIORITY])),
+ atomic_read(&(priv->tx_pending[VO_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BE_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BK_PRIORITY])),
+ priv->stats.txvidrop,
+ priv->stats.txvodrop,
+ priv->stats.txbedrop,
+ priv->stats.txbkdrop,
+ priv->stats.txdatapkt
+ );
+
+ return 0;
+}
+
+static int rtl8192_usb_stats_rx_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ seq_printf(m,
+ "RX packets: %lu\n"
+ "RX urb status error: %lu\n"
+ "RX invalid urb error: %lu\n",
+ priv->stats.rxoktotal,
+ priv->stats.rxstaterr,
+ priv->stats.rxurberr);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_rx);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_tx);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_ap);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_registers);
+
+void rtl8192_debugfs_init_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
+ struct dentry *dir = debugfs_create_dir(dev->name, parent_dir);
+
+ debugfs_create_file("stats-rx", 0444, dir, dev, &rtl8192_usb_stats_rx_fops);
+ debugfs_create_file("stats-tx", 0444, dir, dev, &rtl8192_usb_stats_tx_fops);
+ debugfs_create_file("stats-ap", 0444, dir, dev, &rtl8192_usb_stats_ap_fops);
+ debugfs_create_file("registers", 0444, dir, dev, &rtl8192_usb_registers_fops);
+
+ priv->debugfs_dir = dir;
+}
+
+void rtl8192_debugfs_exit_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ debugfs_remove_recursive(priv->debugfs_dir);
+}
+
+void rtl8192_debugfs_rename_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
+
+ debugfs_rename(parent_dir, priv->debugfs_dir, parent_dir, dev->name);
+}
+
+void rtl8192_debugfs_init(void)
+{
+ debugfs_create_dir(KBUILD_MODNAME, NULL);
+}
+
+void rtl8192_debugfs_exit(void)
+{
+ debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
+}
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 4a93839bf947..132afbf49dde 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -66,16 +66,16 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
struct ethhdr etherhdr;
struct iphdr ip_hdr;
- u16 UserPriority = 0;
+ u16 user_priority = 0;
_r8712_open_pktfile(ppktfile->pkt, ppktfile);
_r8712_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
- /* get UserPriority from IP hdr*/
+ /* get user_priority from IP hdr*/
if (pattrib->ether_type == 0x0800) {
_r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
- /*UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
- UserPriority = ip_hdr.tos >> 5;
+ /*user_priority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
+ user_priority = ip_hdr.tos >> 5;
} else {
/* "When priority processing of data frames is supported,
* a STA's SME should send EAPOL-Key frames at the highest
@@ -83,9 +83,9 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
*/
if (pattrib->ether_type == 0x888e)
- UserPriority = 7;
+ user_priority = 7;
}
- pattrib->priority = UserPriority;
+ pattrib->priority = user_priority;
pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
pattrib->subtype = WIFI_QOS_DATA_TYPE;
}
@@ -140,7 +140,7 @@ void r8712_xmit_complete(struct _adapter *padapter, struct xmit_frame *pxframe)
pxframe->pkt = NULL;
}
-int r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
+netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
{
struct xmit_frame *xmitframe = NULL;
struct _adapter *adapter = netdev_priv(netdev);
@@ -165,11 +165,11 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
}
xmitpriv->tx_pkts++;
xmitpriv->tx_bytes += xmitframe->attrib.last_txcmdsz;
- return 0;
+ return NETDEV_TX_OK;
_xmit_entry_drop:
if (xmitframe)
r8712_free_xmitframe(xmitpriv, xmitframe);
xmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
- return 0;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/staging/rtl8712/xmit_osdep.h b/drivers/staging/rtl8712/xmit_osdep.h
index b76021b568f8..1ad42658c883 100644
--- a/drivers/staging/rtl8712/xmit_osdep.h
+++ b/drivers/staging/rtl8712/xmit_osdep.h
@@ -34,7 +34,7 @@ struct sta_xmit_priv;
struct xmit_frame;
struct xmit_buf;
-int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev);
+netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev);
void r8712_SetFilter(struct work_struct *work);
int r8712_xmit_resource_alloc(struct _adapter *padapter,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index 159ca1b9016b..590bde02058c 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -10,7 +10,6 @@ r8723bs-y = \
core/rtw_ieee80211.o \
core/rtw_mlme.o \
core/rtw_mlme_ext.o \
- core/rtw_odm.o \
core/rtw_pwrctrl.o \
core/rtw_recv.o \
core/rtw_rf.o \
@@ -33,7 +32,6 @@ r8723bs-y = \
hal/odm_DynamicTxPower.o \
hal/odm_EdcaTurboCheck.o \
hal/odm_HWConfig.o \
- hal/odm_NoiseMonitor.o \
hal/odm_RegConfig8723B.o \
hal/rtl8723b_cmd.o \
hal/rtl8723b_dm.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index b4170f64d118..d3f10a3cf972 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -161,8 +161,6 @@ static struct cmd_hdl wlancmds[] = {
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- int res = 0;
-
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -175,18 +173,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = -ENOMEM;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
- res = -ENOMEM;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -196,8 +192,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
-exit:
- return res;
+
+ return 0;
}
static void c2h_wk_callback(struct work_struct *work);
@@ -593,35 +589,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
return res;
}
-u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
-{
- struct cmd_obj *ph2c;
- struct setdatarate_parm *pbsetdataratepara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm));
- if (!pbsetdataratepara) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
- pbsetdataratepara->mac_id = 5;
- memcpy(pbsetdataratepara->datarates, rateset, NumRates);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
- return res;
-}
-
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
/* rtw_free_cmd_obj(pcmd); */
@@ -1140,61 +1107,6 @@ exit:
return res;
}
-u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig)
-{
- struct cmd_obj *pcmdobj;
- struct SetChannelPlan_param *setChannelPlan_param;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- /* check if allow software config */
- if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter)) {
- res = _FAIL;
- goto exit;
- }
-
- /* check input parameter */
- if (!rtw_is_channel_plan_valid(chplan)) {
- res = _FAIL;
- goto exit;
- }
-
- /* prepare cmd parameter */
- setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param));
- if (!setChannelPlan_param) {
- res = _FAIL;
- goto exit;
- }
- setChannelPlan_param->channel_plan = chplan;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!pcmdobj) {
- kfree(setChannelPlan_param);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param) != H2C_SUCCESS)
- res = _FAIL;
-
- kfree(setChannelPlan_param);
- }
-
- /* do something based on res... */
- if (res == _SUCCESS)
- padapter->mlmepriv.ChannelPlan = chplan;
-
-exit:
- return res;
-}
-
static void collect_traffic_statistics(struct adapter *padapter)
{
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 68e41d99679d..3d8a64f69448 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -634,23 +634,6 @@ void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie
}
}
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
-{
- u8 match = false;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- if (!ie_ptr)
- return match;
-
- eid = ie_ptr[0];
-
- if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
- *wps_ielen = ie_ptr[1]+2;
- match = true;
- }
- return match;
-}
-
/**
* rtw_get_wps_ie - Search WPS IE from a series of IEs
* @in_ie: Address of IEs to search
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index f2242cf2dfb4..6498fd17e1d3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -2521,7 +2521,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
{
u8 issued;
int priority;
- struct sta_info *psta = NULL;
+ struct sta_info *psta;
struct ht_priv *phtpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
s32 bmcst = IS_MCAST(pattrib->ra);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index f878b04076d8..8e74b4f47b94 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5945,27 +5945,6 @@ int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset
return connect_allow ? _SUCCESS : _FAIL;
}
-/* Find union about ch, bw, ch_offset of all linked/linking interfaces */
-int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset)
-{
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- struct adapter *iface;
-
- if (ch)
- *ch = 0;
- if (bw)
- *bw = CHANNEL_WIDTH_20;
- if (offset)
- *offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- iface = dvobj->padapters;
-
- if (!check_fwstate(&iface->mlmepriv, _FW_LINKED|_FW_UNDER_LINKING))
- return 0;
-
- return 1;
-}
-
u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
{
struct set_ch_parm *set_ch_parm;
diff --git a/drivers/staging/rtl8723bs/core/rtw_odm.c b/drivers/staging/rtl8723bs/core/rtw_odm.c
deleted file mode 100644
index f6b73a2a0270..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_odm.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtw_debug.h>
-#include <rtw_odm.h>
-#include <hal_data.h>
-
-static const char * const odm_comp_str[] = {
- /* BIT0 */"ODM_COMP_DIG",
- /* BIT1 */"ODM_COMP_RA_MASK",
- /* BIT2 */"ODM_COMP_DYNAMIC_TXPWR",
- /* BIT3 */"ODM_COMP_FA_CNT",
- /* BIT4 */"ODM_COMP_RSSI_MONITOR",
- /* BIT5 */"ODM_COMP_CCK_PD",
- /* BIT6 */"ODM_COMP_ANT_DIV",
- /* BIT7 */"ODM_COMP_PWR_SAVE",
- /* BIT8 */"ODM_COMP_PWR_TRAIN",
- /* BIT9 */"ODM_COMP_RATE_ADAPTIVE",
- /* BIT10 */"ODM_COMP_PATH_DIV",
- /* BIT11 */"ODM_COMP_PSD",
- /* BIT12 */"ODM_COMP_DYNAMIC_PRICCA",
- /* BIT13 */"ODM_COMP_RXHP",
- /* BIT14 */"ODM_COMP_MP",
- /* BIT15 */"ODM_COMP_DYNAMIC_ATC",
- /* BIT16 */"ODM_COMP_EDCA_TURBO",
- /* BIT17 */"ODM_COMP_EARLY_MODE",
- /* BIT18 */NULL,
- /* BIT19 */NULL,
- /* BIT20 */NULL,
- /* BIT21 */NULL,
- /* BIT22 */NULL,
- /* BIT23 */NULL,
- /* BIT24 */"ODM_COMP_TX_PWR_TRACK",
- /* BIT25 */"ODM_COMP_RX_GAIN_TRACK",
- /* BIT26 */"ODM_COMP_CALIBRATION",
- /* BIT27 */NULL,
- /* BIT28 */NULL,
- /* BIT29 */NULL,
- /* BIT30 */"ODM_COMP_COMMON",
- /* BIT31 */"ODM_COMP_INIT",
-};
-
-#define RTW_ODM_COMP_MAX 32
-
-static const char * const odm_ability_str[] = {
- /* BIT0 */"ODM_BB_DIG",
- /* BIT1 */"ODM_BB_RA_MASK",
- /* BIT2 */"ODM_BB_DYNAMIC_TXPWR",
- /* BIT3 */"ODM_BB_FA_CNT",
- /* BIT4 */"ODM_BB_RSSI_MONITOR",
- /* BIT5 */"ODM_BB_CCK_PD",
- /* BIT6 */"ODM_BB_ANT_DIV",
- /* BIT7 */"ODM_BB_PWR_SAVE",
- /* BIT8 */"ODM_BB_PWR_TRAIN",
- /* BIT9 */"ODM_BB_RATE_ADAPTIVE",
- /* BIT10 */"ODM_BB_PATH_DIV",
- /* BIT11 */"ODM_BB_PSD",
- /* BIT12 */"ODM_BB_RXHP",
- /* BIT13 */"ODM_BB_ADAPTIVITY",
- /* BIT14 */"ODM_BB_DYNAMIC_ATC",
- /* BIT15 */NULL,
- /* BIT16 */"ODM_MAC_EDCA_TURBO",
- /* BIT17 */"ODM_MAC_EARLY_MODE",
- /* BIT18 */NULL,
- /* BIT19 */NULL,
- /* BIT20 */NULL,
- /* BIT21 */NULL,
- /* BIT22 */NULL,
- /* BIT23 */NULL,
- /* BIT24 */"ODM_RF_TX_PWR_TRACK",
- /* BIT25 */"ODM_RF_RX_GAIN_TRACK",
- /* BIT26 */"ODM_RF_CALIBRATION",
-};
-
-#define RTW_ODM_ABILITY_MAX 27
-
-static const char * const odm_dbg_level_str[] = {
- NULL,
- "ODM_DBG_OFF",
- "ODM_DBG_SERIOUS",
- "ODM_DBG_WARNING",
- "ODM_DBG_LOUD",
- "ODM_DBG_TRACE",
-};
-
-#define RTW_ODM_DBG_LEVEL_NUM 6
-
-void rtw_odm_dbg_comp_msg(struct adapter *adapter)
-{
- u64 dbg_comp;
- int i;
-
- rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &dbg_comp);
- netdev_dbg(adapter->pnetdev, "odm.DebugComponents = 0x%016llx\n",
- dbg_comp);
- for (i = 0; i < RTW_ODM_COMP_MAX; i++) {
- if (odm_comp_str[i])
- netdev_dbg(adapter->pnetdev, "%cBIT%-2d %s\n",
- (BIT0 << i) & dbg_comp ? '+' : ' ', i,
- odm_comp_str[i]);
- }
-}
-
-inline void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps)
-{
- rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &comps);
-}
-
-void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter)
-{
- u32 dbg_level;
- int i;
-
- rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &dbg_level);
- netdev_dbg(adapter->pnetdev, "odm.DebugLevel = %u\n", dbg_level);
- for (i = 0; i < RTW_ODM_DBG_LEVEL_NUM; i++) {
- if (odm_dbg_level_str[i])
- netdev_dbg(adapter->pnetdev, "%u %s\n", i,
- odm_dbg_level_str[i]);
- }
-}
-
-inline void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level)
-{
- rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &level);
-}
-
-void rtw_odm_ability_msg(void *sel, struct adapter *adapter)
-{
- u32 ability = 0;
- int i;
-
- rtw_hal_get_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
- netdev_dbg(adapter->pnetdev, "odm.SupportAbility = 0x%08x\n", ability);
- for (i = 0; i < RTW_ODM_ABILITY_MAX; i++) {
- if (odm_ability_str[i])
- netdev_dbg(adapter->pnetdev, "%cBIT%-2d %s\n",
- (BIT0 << i) & ability ? '+' : ' ', i,
- odm_ability_str[i]);
- }
-}
-
-inline void rtw_odm_ability_set(struct adapter *adapter, u32 ability)
-{
- rtw_hal_set_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
-}
-
-void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &pHalData->odmpriv;
-
- netdev_dbg(adapter->pnetdev, "%10s %16s %8s %10s %11s %14s\n",
- "TH_L2H_ini", "TH_EDCCA_HL_diff", "IGI_Base", "ForceEDCCA",
- "AdapEn_RSSI", "IGI_LowerBound");
- netdev_dbg(adapter->pnetdev,
- "0x%-8x %-16d 0x%-6x %-10d %-11u %-14u\n",
- (u8)odm->TH_L2H_ini,
- odm->TH_EDCCA_HL_diff,
- odm->IGI_Base,
- odm->ForceEDCCA,
- odm->AdapEn_RSSI,
- odm->IGI_LowerBound);
-}
-
-void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini,
- s8 TH_EDCCA_HL_diff, s8 IGI_Base,
- bool ForceEDCCA, u8 AdapEn_RSSI,
- u8 IGI_LowerBound)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &pHalData->odmpriv;
-
- odm->TH_L2H_ini = TH_L2H_ini;
- odm->TH_EDCCA_HL_diff = TH_EDCCA_HL_diff;
- odm->IGI_Base = IGI_Base;
- odm->ForceEDCCA = ForceEDCCA;
- odm->AdapEn_RSSI = AdapEn_RSSI;
- odm->IGI_LowerBound = IGI_LowerBound;
-}
-
-void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter)
-{
- struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &hal_data->odmpriv;
-
- netdev_dbg(adapter->pnetdev,
- "RxRate = %s, RSSI_A = %d(%%), RSSI_B = %d(%%)\n",
- HDATA_RATE(odm->RxRate), odm->RSSI_A, odm->RSSI_B);
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index d8d394b67eeb..2825375bff94 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -203,22 +203,12 @@ signed int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *q
}
/*
-signed int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
-{
- return rtw_free_recvframe(precvframe, queue);
-}
-*/
-
-
-
-
-/*
-caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spinlock to protect
-
-*/
+ * caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+ *
+ * using spinlock to protect
+ *
+ */
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
@@ -245,6 +235,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
union recv_frame *pending_frame;
+
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
cnt++;
@@ -397,6 +388,7 @@ static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *p
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+
prxattrib->key_index = (((iv[3])>>6)&0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -882,6 +874,7 @@ static signed int sta2ap_data_frame(struct adapter *adapter, union recv_frame *p
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
+
if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
@@ -1125,6 +1118,7 @@ static union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union re
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (!psta) {
u8 type = GetFrameType(pfhdr->rx_data);
+
if (type != WIFI_DATA_TYPE) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
@@ -1207,6 +1201,7 @@ static signed int validate_recv_mgnt_frame(struct adapter *padapter, union recv_
{
/* for rx pkt statistics */
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON)
@@ -1374,9 +1369,8 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
/* actual management data frame body */
data_len = pattrib->pkt_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
mgmt_DATA = rtw_zmalloc(data_len);
- if (!mgmt_DATA) {
+ if (!mgmt_DATA)
goto validate_80211w_fail;
- }
precv_frame = decryptor(adapter, precv_frame);
/* save actual management data frame body */
memcpy(mgmt_DATA, ptr+pattrib->hdrlen+pattrib->iv_len, data_len);
@@ -1385,9 +1379,8 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
/* remove the iv and icv length */
pattrib->pkt_len = pattrib->pkt_len - pattrib->iv_len - pattrib->icv_len;
kfree(mgmt_DATA);
- if (!precv_frame) {
+ if (!precv_frame)
goto validate_80211w_fail;
- }
} else if (IS_MCAST(GetAddr1Ptr(ptr)) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)) {
signed int BIP_ret = _SUCCESS;
@@ -1480,6 +1473,7 @@ static signed int validate_recv_frame(struct adapter *adapter, union recv_frame
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
+
precvpriv->rx_drop++;
} else if (retval == _SUCCESS) {
#ifdef DBG_RX_DUMP_EAP
@@ -1651,14 +1645,12 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
/* Rx Reorder initialize condition. */
- if (preorder_ctrl->indicate_seq == 0xFFFF) {
+ if (preorder_ctrl->indicate_seq == 0xFFFF)
preorder_ctrl->indicate_seq = seq_num;
- }
/* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) {
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
return false;
- }
/* */
/* Sliding window manipulation. Conditions includes: */
@@ -2084,10 +2076,8 @@ s32 rtw_recv_entry(union recv_frame *precvframe)
precvpriv = &padapter->recvpriv;
ret = recv_func(padapter, precvframe);
- if (ret == _FAIL) {
+ if (ret == _FAIL)
goto _recv_entry_drop;
- }
-
precvpriv->rx_pkts++;
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 9acd49323c7c..e36f8c369a04 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1283,11 +1283,6 @@ s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter)
return (s32)GLBtCoexist.btInfo.bBtCtrlAggBufSize;
}
-void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual)
-{
- GLBtCoexist.bManualControl = bmanual;
-}
-
bool hal_btcoex_IsBtControlLps(struct adapter *padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 909b37bcc897..e42556d03bce 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -861,25 +861,6 @@ bool eqNByte(u8 *str1, u8 *str2, u32 num)
/* */
/* Description: */
-/* Return true if chTmp is represent for hex digit and */
-/* false otherwise. */
-/* */
-/* */
-bool IsHexDigit(char chTmp)
-{
- if (
- (chTmp >= '0' && chTmp <= '9') ||
- (chTmp >= 'a' && chTmp <= 'f') ||
- (chTmp >= 'A' && chTmp <= 'F')
- )
- return true;
- else
- return false;
-}
-
-
-/* */
-/* Description: */
/* Translate a character to hex digit. */
/* */
u32 MapCharToHexDigit(char chTmp)
@@ -894,106 +875,6 @@ u32 MapCharToHexDigit(char chTmp)
return 0;
}
-
-
-/* Description: */
-/* Parse hex number from the string pucStr. */
-bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove)
-{
- char *szScan = szStr;
-
- /* Check input parameter. */
- if (!szStr || !pu4bVal || !pu4bMove)
- return false;
-
- /* Initialize output. */
- *pu4bMove = 0;
- *pu4bVal = 0;
-
- /* Skip leading space. */
- while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
- szScan++;
- (*pu4bMove)++;
- }
-
- /* Skip leading '0x' or '0X'. */
- if (*szScan == '0' && (*(szScan+1) == 'x' || *(szScan+1) == 'X')) {
- szScan += 2;
- (*pu4bMove) += 2;
- }
-
- /* Check if szScan is now pointer to a character for hex digit, */
- /* if not, it means this is not a valid hex number. */
- if (!IsHexDigit(*szScan))
- return false;
-
- /* Parse each digit. */
- do {
- (*pu4bVal) <<= 4;
- *pu4bVal += MapCharToHexDigit(*szScan);
-
- szScan++;
- (*pu4bMove)++;
- } while (IsHexDigit(*szScan));
-
- return true;
-}
-
-bool GetFractionValueFromString(
- char *szStr, u8 *pInteger, u8 *pFraction, u32 *pu4bMove
-)
-{
- char *szScan = szStr;
-
- /* Initialize output. */
- *pu4bMove = 0;
- *pInteger = 0;
- *pFraction = 0;
-
- /* Skip leading space. */
- while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
- ++szScan;
- ++(*pu4bMove);
- }
-
- /* Parse each digit. */
- do {
- (*pInteger) *= 10;
- *pInteger += (*szScan - '0');
-
- ++szScan;
- ++(*pu4bMove);
-
- if (*szScan == '.') {
- ++szScan;
- ++(*pu4bMove);
-
- if (*szScan < '0' || *szScan > '9')
- return false;
- else {
- *pFraction = *szScan - '0';
- ++szScan;
- ++(*pu4bMove);
- return true;
- }
- }
- } while (*szScan >= '0' && *szScan <= '9');
-
- return true;
-}
-
-/* */
-/* Description: */
-/* Return true if szStr is comment out with leading "//". */
-/* */
-bool IsCommentString(char *szStr)
-{
- if (*szStr == '/' && *(szStr+1) == '/')
- return true;
- else
- return false;
-}
-
bool GetU1ByteIntegerFromStringInDecimal(char *Str, u8 *pInt)
{
u16 i = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 94ecefb9113d..6bb0ff8d7c78 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -400,11 +400,6 @@ c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
return adapter->HalFunc.c2h_id_filter_ccx;
}
-s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter)
-{
- return GET_HAL_DATA(padapter)->bDisableSWChannelPlan;
-}
-
s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid)
{
u8 support;
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 19cfc2915458..fe9782d2d4fd 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -14,7 +14,6 @@
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
-#include "odm_NoiseMonitor.h"
#define TP_MODE 0
#define RSSI_MODE 1
@@ -863,7 +862,6 @@ struct dm_odm_t { /* DM_Out_Source_Dynamic_Mechanism_Structure */
u8 Adaptivity_IGI_upper;
u8 NHM_cnt_0;
- struct odm_noise_monitor noise_level;/* ODM_MAX_CHANNEL_NUM]; */
/* */
/* 2 Define STA info. */
/* _ODM_STA_INFO */
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 7e92c373cddb..07edf74ccfe5 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -309,63 +309,6 @@ void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI)
}
-void odm_PauseDIG(
- void *pDM_VOID,
- enum ODM_Pause_DIG_TYPE PauseType,
- u8 IGIValue
-)
-{
- struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
- static bool bPaused;
-
- if (
- (pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY) &&
- pDM_Odm->TxHangFlg == true
- ) {
- return;
- }
-
- if (
- !bPaused && (!(pDM_Odm->SupportAbility & ODM_BB_DIG) ||
- !(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
- ){
- return;
- }
-
- switch (PauseType) {
- /* 1 Pause DIG */
- case ODM_PAUSE_DIG:
- /* 2 Disable DIG */
- ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility & (~ODM_BB_DIG));
-
- /* 2 Backup IGI value */
- if (!bPaused) {
- pDM_DigTable->IGIBackup = pDM_DigTable->CurIGValue;
- bPaused = true;
- }
-
- /* 2 Write new IGI value */
- ODM_Write_DIG(pDM_Odm, IGIValue);
- break;
-
- /* 1 Resume DIG */
- case ODM_RESUME_DIG:
- if (bPaused) {
- /* 2 Write backup IGI value */
- ODM_Write_DIG(pDM_Odm, pDM_DigTable->IGIBackup);
- bPaused = false;
-
- /* 2 Enable DIG */
- ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility | ODM_BB_DIG);
- }
- break;
-
- default:
- break;
- }
-}
-
bool odm_DigAbort(void *pDM_VOID)
{
struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.h b/drivers/staging/rtl8723bs/hal/odm_DIG.h
index 88cfd542df16..a5b041101c89 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.h
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.h
@@ -141,8 +141,6 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI);
void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI);
-void odm_PauseDIG(void *pDM_VOID, enum ODM_Pause_DIG_TYPE PauseType, u8 IGIValue);
-
void odm_DIGInit(void *pDM_VOID);
void odm_DIG(void *pDM_VOID);
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
deleted file mode 100644
index 392cc8a398f5..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-
-/* This function is for inband noise test utility only */
-/* To obtain the inband noise level(dbm), do the following. */
-/* 1. disable DIG and Power Saving */
-/* 2. Set initial gain = 0x1a */
-/* 3. Stop updating idle time pwer report (for driver read) */
-/* - 0x80c[25] */
-
-#define Valid_Min -35
-#define Valid_Max 10
-#define ValidCnt 5
-
-static s16 odm_InbandNoise_Monitor_NSeries(
- struct dm_odm_t *pDM_Odm,
- u8 bPauseDIG,
- u8 IGIValue,
- u32 max_time
-)
-{
- u32 tmp4b;
- u8 max_rf_path = 0, rf_path;
- u8 reg_c50, reg_c58, valid_done = 0;
- struct noise_level noise_data;
- u32 start = 0;
-
- pDM_Odm->noise_level.noise_all = 0;
-
- max_rf_path = 1;
-
- memset(&noise_data, 0, sizeof(struct noise_level));
-
- /* */
- /* Step 1. Disable DIG && Set initial gain. */
- /* */
-
- if (bPauseDIG)
- odm_PauseDIG(pDM_Odm, ODM_PAUSE_DIG, IGIValue);
- /* */
- /* Step 2. Disable all power save for read registers */
- /* */
- /* dcmd_DebugControlPowerSave(padapter, PSDisable); */
-
- /* */
- /* Step 3. Get noise power level */
- /* */
- start = jiffies;
- while (1) {
-
- /* Stop updating idle time pwer report (for driver read) */
- PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 1);
-
- /* Read Noise Floor Report */
- tmp4b = PHY_QueryBBReg(pDM_Odm->Adapter, 0x8f8, bMaskDWord);
-
- /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0, TestInitialGain); */
- /* if (max_rf_path == 2) */
- /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0, TestInitialGain); */
-
- /* update idle time pwer report per 5us */
- PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 0);
-
- noise_data.value[RF_PATH_A] = (u8)(tmp4b&0xff);
- noise_data.value[RF_PATH_B] = (u8)((tmp4b&0xff00)>>8);
-
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- noise_data.sval[rf_path] = (s8)noise_data.value[rf_path];
- noise_data.sval[rf_path] /= 2;
- }
- /* mdelay(10); */
- /* msleep(10); */
-
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- if ((noise_data.valid_cnt[rf_path] < ValidCnt) && (noise_data.sval[rf_path] < Valid_Max && noise_data.sval[rf_path] >= Valid_Min)) {
- noise_data.valid_cnt[rf_path]++;
- noise_data.sum[rf_path] += noise_data.sval[rf_path];
- if (noise_data.valid_cnt[rf_path] == ValidCnt) {
- valid_done++;
- }
-
- }
-
- }
-
- /* printk("####### valid_done:%d #############\n", valid_done); */
- if ((valid_done == max_rf_path) || (jiffies_to_msecs(jiffies - start) > max_time)) {
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- /* printk("%s PATH_%d - sum = %d, valid_cnt = %d\n", __func__, rf_path, noise_data.sum[rf_path], noise_data.valid_cnt[rf_path]); */
- if (noise_data.valid_cnt[rf_path])
- noise_data.sum[rf_path] /= noise_data.valid_cnt[rf_path];
- else
- noise_data.sum[rf_path] = 0;
- }
- break;
- }
- }
- reg_c50 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
- reg_c50 &= ~BIT7;
- pDM_Odm->noise_level.noise[RF_PATH_A] = -110 + reg_c50 + noise_data.sum[RF_PATH_A];
- pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[RF_PATH_A];
-
- if (max_rf_path == 2) {
- reg_c58 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
- reg_c58 &= ~BIT7;
- pDM_Odm->noise_level.noise[RF_PATH_B] = -110 + reg_c58 + noise_data.sum[RF_PATH_B];
- pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[RF_PATH_B];
- }
- pDM_Odm->noise_level.noise_all /= max_rf_path;
-
- /* */
- /* Step 4. Recover the Dig */
- /* */
- if (bPauseDIG)
- odm_PauseDIG(pDM_Odm, ODM_RESUME_DIG, IGIValue);
-
- return pDM_Odm->noise_level.noise_all;
-
-}
-
-s16 ODM_InbandNoise_Monitor(void *pDM_VOID, u8 bPauseDIG, u8 IGIValue, u32 max_time)
-{
- return odm_InbandNoise_Monitor_NSeries(pDM_VOID, bPauseDIG, IGIValue, max_time);
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
deleted file mode 100644
index ab114543f39c..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- *****************************************************************************/
-#ifndef __ODMNOISEMONITOR_H__
-#define __ODMNOISEMONITOR_H__
-
-#define ODM_MAX_CHANNEL_NUM 38/* 14+24 */
-struct noise_level {
- /* u8 value_a, value_b; */
- u8 value[MAX_RF_PATH];
- /* s8 sval_a, sval_b; */
- s8 sval[MAX_RF_PATH];
-
- /* s32 noise_a = 0, noise_b = 0, sum_a = 0, sum_b = 0; */
- /* s32 noise[ODM_RF_PATH_MAX]; */
- s32 sum[MAX_RF_PATH];
- /* u8 valid_cnt_a = 0, valid_cnt_b = 0, */
- u8 valid[MAX_RF_PATH];
- u8 valid_cnt[MAX_RF_PATH];
-
-};
-
-
-struct odm_noise_monitor {
- s8 noise[MAX_RF_PATH];
- s16 noise_all;
-};
-
-s16 ODM_InbandNoise_Monitor(
- void *pDM_VOID,
- u8 bPauseDIG,
- u8 IGIValue,
- u32 max_time
-);
-
-#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_precomp.h b/drivers/staging/rtl8723bs/hal/odm_precomp.h
index edce506022a5..2987857a8761 100644
--- a/drivers/staging/rtl8723bs/hal/odm_precomp.h
+++ b/drivers/staging/rtl8723bs/hal/odm_precomp.h
@@ -33,7 +33,6 @@
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
-#include "odm_NoiseMonitor.h"
#include "HalPhyRf.h"
#include "HalPhyRf_8723B.h"/* for IQK, LCK, Power-tracking */
#include "rtl8723b_hal.h"
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 0bbbdebdf157..82159e1c7f9b 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -50,7 +50,6 @@
#include <rtw_mlme_ext.h>
#include <rtw_ap.h>
#include <rtw_version.h>
-#include <rtw_odm.h>
#include "ioctl_cfg80211.h"
@@ -493,8 +492,6 @@ static inline u8 *myid(struct eeprom_priv *peepriv)
#include <rtw_btcoex.h>
-int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
extern int rtw_ht_enable;
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
index 78599d3521bf..fb167642da01 100644
--- a/drivers/staging/rtl8723bs/include/hal_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -45,7 +45,6 @@ void hal_btcoex_HaltNotify(struct adapter *padapter);
void hal_btcoex_Handler(struct adapter *padapter);
s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter);
-void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual);
bool hal_btcoex_IsBtControlLps(struct adapter *padapter);
bool hal_btcoex_IsLpsOn(struct adapter *padapter);
u8 hal_btcoex_RpwmVal(struct adapter *);
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 7be0ea20bca4..6356b8c2ef81 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -147,17 +147,8 @@ u8 GetHalDefVar(struct adapter *adapter, enum hal_def_variable variable,
bool eqNByte(u8 *str1, u8 *str2, u32 num);
-bool IsHexDigit(char chTmp);
-
u32 MapCharToHexDigit(char chTmp);
-bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove);
-
-bool GetFractionValueFromString(char *szStr, u8 *pInteger, u8 *pFraction,
- u32 *pu4bMove);
-
-bool IsCommentString(char *szStr);
-
bool ParseQualifiedString(char *In, u32 *Start, char *Out, char LeftQualifier,
char RightQualifier);
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 45bebbadb7ca..5cffab2d06ff 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -353,8 +353,6 @@ bool rtw_hal_c2h_valid(struct adapter *adapter, u8 *buf);
s32 rtw_hal_c2h_handler(struct adapter *adapter, u8 *c2h_evt);
c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
-s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter);
-
s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid);
s32 rtw_hal_macid_wakeup(struct adapter *padapter, u32 macid);
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 1e627dc0044d..9041d8dc5fb1 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -746,7 +746,6 @@ int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwi
void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len);
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr);
u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content);
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index 1bf030cbbbbe..fe1b03101203 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -591,7 +591,6 @@ extern u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8
extern u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork);
u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
extern u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrastructure networktype, bool enqueue);
-extern u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
extern u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
extern u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
@@ -613,8 +612,6 @@ extern u8 rtw_ps_cmd(struct adapter *padapter);
u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
-extern u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig);
-
extern u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length);
extern u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 89b389d4c44b..65e138a5238f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -662,7 +662,6 @@ extern void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint
extern u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer);
int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset);
-int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset);
struct cmd_hdl {
uint parmsize;
diff --git a/drivers/staging/rtl8723bs/include/rtw_odm.h b/drivers/staging/rtl8723bs/include/rtw_odm.h
deleted file mode 100644
index 94fc68a5c424..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_odm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_ODM_H__
-#define __RTW_ODM_H__
-
-#include <drv_types.h>
-
-/*
-* This file provides utilities/wrappers for rtw driver to use ODM
-*/
-
-void rtw_odm_dbg_comp_msg(struct adapter *adapter);
-void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps);
-void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter);
-void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level);
-
-void rtw_odm_ability_msg(void *sel, struct adapter *adapter);
-void rtw_odm_ability_set(struct adapter *adapter, u32 ability);
-
-void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter);
-void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini, s8 TH_EDCCA_HL_diff,
- s8 IGI_Base, bool ForceEDCCA, u8 AdapEn_RSSI, u8 IGI_LowerBound);
-void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter);
-#endif /* __RTW_ODM_H__ */
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
index e781cd5dfd01..8704dced593a 100644
--- a/drivers/staging/rtl8723bs/include/xmit_osdep.h
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -25,8 +25,8 @@ struct sta_xmit_priv;
struct xmit_frame;
struct xmit_buf;
-extern int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
-extern int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+extern void _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+extern netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
void rtw_os_xmit_schedule(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 2d09be6425d5..6aeb169c6ebf 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -198,7 +198,7 @@ static int rtw_ieee80211_channel_to_frequency(int chan, int band)
if (band == NL80211_BAND_2GHZ) {
if (chan == 14)
return 2484;
- else if (chan < 14)
+ else if (chan < 14)
return 2407 + chan * 5;
}
@@ -810,7 +810,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
- padapter->securitypriv.binstallGrpkey = true;
+ padapter->securitypriv.binstallGrpkey = true;
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
@@ -920,9 +920,9 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true
- || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
- ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
- }
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
+ }
addkey_end:
kfree(param);
@@ -1066,7 +1066,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
}
}
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1240,7 +1240,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
rtw_ps_deny(padapter, PS_DENY_SCAN);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
need_indicate_scan_done = true;
goto check_need_indicate_scan_done;
}
@@ -1500,49 +1500,49 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
pairwise_cipher = WPA_CIPHER_NONE;
switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
}
switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
}
{/* handle wps_ie */
@@ -1583,7 +1583,7 @@ static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ret = 0;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1674,7 +1674,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
}
rtw_ps_deny(padapter, PS_DENY_JOIN);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1849,6 +1849,7 @@ static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
inline bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter)
{
struct rtw_wdev_priv *rtw_wdev_priv = adapter_wdev_data(adapter);
+
return rtw_wdev_priv->power_mgmt;
}
@@ -1954,6 +1955,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
{
struct station_info sinfo = {};
u8 ie_offset;
+
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
else /* WIFI_REASSOCREQ */
@@ -2085,7 +2087,8 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
/* Use the real net device to transmit the packet */
- return _rtw_xmit_entry(skb, padapter->pnetdev);
+ _rtw_xmit_entry(skb, padapter->pnetdev);
+ return NETDEV_TX_OK;
} else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE)) ==
(IEEE80211_FTYPE_MGMT|IEEE80211_STYPE_ACTION)) {
@@ -2348,7 +2351,7 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_beacon_data *info)
+ struct cfg80211_beacon_data *info)
{
struct adapter *adapter = rtw_netdev_priv(ndev);
@@ -2467,7 +2470,7 @@ static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *nde
spin_lock_bh(&pstapriv->asoc_list_lock);
psta = rtw_sta_info_get_by_idx(idx, pstapriv);
spin_unlock_bh(&pstapriv->asoc_list_lock);
- if (NULL == psta) {
+ if (psta == NULL) {
ret = -ENOENT;
goto exit;
}
@@ -2602,7 +2605,7 @@ static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
goto exit;
rtw_ps_deny(padapter, PS_DENY_MGNT_TX);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EFAULT;
goto cancel_ps_deny;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 380d8c9e1239..68bba3c0e757 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -664,51 +664,36 @@ void rtw_reset_drv_sw(struct adapter *padapter)
u8 rtw_init_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
-
rtw_init_default_value(padapter);
rtw_init_hal_com_default_value(padapter);
- if (rtw_init_cmd_priv(&padapter->cmdpriv)) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_cmd_priv(&padapter->cmdpriv))
+ return _FAIL;
padapter->cmdpriv.padapter = padapter;
- if (rtw_init_evt_priv(&padapter->evtpriv)) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_evt_priv(&padapter->evtpriv))
+ goto free_cmd_priv;
-
- if (rtw_init_mlme_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_mlme_priv(padapter) == _FAIL)
+ goto free_evt_priv;
init_mlme_ext_priv(padapter);
- if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL)
+ goto free_mlme_ext;
- if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL)
+ goto free_xmit_priv;
/* add for CONFIG_IEEE80211W, none 11w also can use */
spin_lock_init(&padapter->security_key_mutex);
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
/* memset((unsigned char *)&padapter->securitypriv, 0, sizeof (struct security_priv)); */
- if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL)
+ goto free_recv_priv;
padapter->stapriv.padapter = padapter;
padapter->setband = GHZ24_50;
@@ -719,9 +704,26 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_dm_init(padapter);
-exit:
+ return _SUCCESS;
+
+free_recv_priv:
+ _rtw_free_recv_priv(&padapter->recvpriv);
+
+free_xmit_priv:
+ _rtw_free_xmit_priv(&padapter->xmitpriv);
+
+free_mlme_ext:
+ free_mlme_ext_priv(&padapter->mlmeextpriv);
- return ret8;
+ rtw_free_mlme_priv(&padapter->mlmepriv);
+
+free_evt_priv:
+ rtw_free_evt_priv(&padapter->evtpriv);
+
+free_cmd_priv:
+ rtw_free_cmd_priv(&padapter->cmdpriv);
+
+ return _FAIL;
}
void rtw_cancel_all_timer(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 4fbfa75c05d7..f09c1324c39c 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -108,56 +108,6 @@ RETURN:
return;
}
-int rtw_change_ifname(struct adapter *padapter, const char *ifname)
-{
- struct net_device *pnetdev;
- struct net_device *cur_pnetdev;
- struct rereg_nd_name_data *rereg_priv;
- int ret;
-
- if (!padapter)
- goto error;
-
- cur_pnetdev = padapter->pnetdev;
- rereg_priv = &padapter->rereg_nd_name_priv;
-
- /* free the old_pnetdev */
- if (rereg_priv->old_pnetdev) {
- free_netdev(rereg_priv->old_pnetdev);
- rereg_priv->old_pnetdev = NULL;
- }
-
- if (!rtnl_is_locked())
- unregister_netdev(cur_pnetdev);
- else
- unregister_netdevice(cur_pnetdev);
-
- rereg_priv->old_pnetdev = cur_pnetdev;
-
- pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev)
- goto error;
-
- SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
-
- rtw_init_netdev_name(pnetdev, ifname);
-
- eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
-
- if (!rtnl_is_locked())
- ret = register_netdev(pnetdev);
- else
- ret = register_netdevice(pnetdev);
-
- if (ret != 0)
- goto error;
-
- return 0;
-
-error:
- return -1;
-}
-
void rtw_buf_free(u8 **buf, u32 *buf_len)
{
if (!buf || !buf_len)
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 530e7a6c67c5..1eeabfffd6d2 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -181,7 +181,7 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
return true;
}
-int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+void _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -202,7 +202,7 @@ int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
res = rtw_mlcst2unicst(padapter, pkt);
if (res)
- goto exit;
+ return;
}
}
@@ -210,22 +210,17 @@ int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
if (res < 0)
goto drop_packet;
- goto exit;
+ return;
drop_packet:
pxmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
-
-exit:
- return 0;
}
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
- int ret = 0;
-
if (pkt)
- ret = _rtw_xmit_entry(pkt, pnetdev);
+ _rtw_xmit_entry(pkt, pnetdev);
- return ret;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index ce04c38f6afd..168ae2e9005d 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -387,7 +387,8 @@ static int lynxfb_ops_set_par(struct fb_info *info)
ret = lynxfb_set_color_offsets(info);
- var->height = var->width = -1;
+ var->height = -1;
+ var->width = -1;
var->accel_flags = 0;/*FB_ACCELF_TEXT;*/
if (ret) {
@@ -499,7 +500,8 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
return ret;
}
- var->height = var->width = -1;
+ var->height = -1;
+ var->width = -1;
var->accel_flags = 0;/* FB_ACCELF_TEXT; */
/* check if current fb's video memory big enough to hold the onscreen*/
@@ -724,7 +726,8 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
0x800f0 + (int)crtc->channel * 0x140;
pr_info("crtc->cursor.mmio = %p\n", crtc->cursor.mmio);
- crtc->cursor.max_h = crtc->cursor.max_w = 64;
+ crtc->cursor.max_h = 64;
+ crtc->cursor.max_w = 64;
crtc->cursor.size = crtc->cursor.max_h * crtc->cursor.max_w * 2 / 8;
crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
@@ -1022,7 +1025,8 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
if (!sm750_dev)
return err;
- sm750_dev->fbinfo[0] = sm750_dev->fbinfo[1] = NULL;
+ sm750_dev->fbinfo[0] = NULL;
+ sm750_dev->fbinfo[1] = NULL;
sm750_dev->devid = pdev->device;
sm750_dev->revid = pdev->revision;
sm750_dev->pdev = pdev;
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 956476213241..020e0b3bce64 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -125,8 +125,8 @@ static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
- dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
- ": 0x%x\n", i, val);
+ dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
+ i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
@@ -143,14 +143,12 @@ static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
bridge = tsi148_bridge->driver_priv;
- dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
- "attributes: %08x\n",
+ dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
- dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
- "completion reg: %08x\n",
+ dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
@@ -180,10 +178,8 @@ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
- if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
- dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
- "Occurred\n");
- }
+ if (error_attrib & TSI148_LCSR_VEAT_VEOF)
+ dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
if (err_chk)
vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
@@ -317,8 +313,8 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
- dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
- "vector %02X\n", pdev->irq);
+ dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
+ pdev->irq);
return result;
}
@@ -529,8 +525,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
- dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
return -EINVAL;
}
@@ -588,7 +583,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
- temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
+ temp_ctl |= TSI148_LCSR_ITAT_SUPR;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
@@ -762,8 +757,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
- dev_err(tsi148_bridge->parent, "Failed to allocate mem "
- "resource for window %d size 0x%lx start 0x%lx\n",
+ dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
@@ -827,15 +821,13 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
/* Verify input data */
if (vme_base & 0xFFFF) {
- dev_err(tsi148_bridge->parent, "Invalid VME Window "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
- dev_err(tsi148_bridge->parent, "Size must be non-zero for "
- "enabled windows\n");
+ dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
retval = -EINVAL;
goto err_window;
}
@@ -849,8 +841,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
- "resource\n");
+ dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
goto err_res;
}
@@ -890,8 +881,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid VME Offset "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
retval = -EINVAL;
goto err_gran;
}
@@ -937,8 +927,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
- dev_warn(tsi148_bridge->parent, "Currently not setting "
- "Broadcast Select Registers\n");
+ dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
@@ -1451,8 +1440,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
val |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
@@ -1550,8 +1538,7 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
val |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
@@ -1639,8 +1626,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & 0x7) {
- dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
- "byte boundary as required: %p\n",
+ dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
&entry->descriptor);
retval = -EINVAL;
goto err_align;
@@ -1827,10 +1813,10 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
- } else {
- list_add(&list->list, &ctrlr->running);
}
+ list_add(&list->list, &ctrlr->running);
+
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
@@ -1935,8 +1921,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor "
- "callback attached, can't reset\n");
+ dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
return -EBUSY;
}
}
@@ -1961,7 +1946,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
}
if (cycle & VME_SUPER)
- lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
+ lm_ctl |= TSI148_LCSR_LMAT_SUPR;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
@@ -2051,8 +2036,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor not properly "
- "configured\n");
+ dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
return -EINVAL;
}
@@ -2196,8 +2180,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus, GFP_KERNEL);
if (!bridge->crcsr_kernel) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "CR/CSR image\n");
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
return -ENOMEM;
}
@@ -2237,8 +2220,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
- dev_err(tsi148_bridge->parent, "Configuring flush image"
- " failed\n");
+ dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
}
return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 5de841cb776c..6ce41983dcf4 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2083,7 +2083,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
- MACvSetRFLE_LatchBase(iobase);
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT);
/* {{ RobertYu: 20050104 */
} else {
/* No VGA Table now */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 846469cc06bb..c680925b9c92 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -55,9 +55,15 @@ static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
/*--------------------- Static Functions --------------------------*/
-static void s_vCalculateOFDMRParameter(unsigned char rate, u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime);
+static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value & ~ENCFG_BBTYPE_MASK;
+ reg_value = reg_value | mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
/*--------------------- Export Functions --------------------------*/
@@ -186,21 +192,21 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
if (bb_type == BB_TYPE_11A) {
- MACvSetBBType(priv->port_offset, BB_TYPE_11A);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11A);
bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
byCWMaxMin = 0xA4;
} else if (bb_type == BB_TYPE_11B) {
- MACvSetBBType(priv->port_offset, BB_TYPE_11B);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11B);
bb_write_embedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
- MACvSetBBType(priv->port_offset, BB_TYPE_11G);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11G);
bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
@@ -403,9 +409,9 @@ void CARDvSafeResetTx(struct vnt_private *priv)
}
/* set MAC TD pointer */
- MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
+ vt6655_mac_set_curr_tx_desc_addr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
- MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
+ vt6655_mac_set_curr_tx_desc_addr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
@@ -452,9 +458,9 @@ void CARDvSafeResetRx(struct vnt_private *priv)
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL0);
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL1);
/* set MAC RD pointer */
- MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
+ vt6655_mac_set_curr_rx_0_desc_addr(priv, priv->rd0_pool_dma);
- MACvSetCurrRx1DescAddr(priv, priv->rd1_pool_dma);
+ vt6655_mac_set_curr_rx_1_desc_addr(priv, priv->rd1_pool_dma);
}
/*
@@ -539,7 +545,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
spin_lock_irqsave(&priv->lock, flags);
/* Set to Page1 */
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
/* RSPINF_b_1 */
vnt_get_phy_field(priv, 14,
@@ -637,7 +643,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index e926f9829a15..4122875ebcaa 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -116,12 +116,12 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
spin_lock_irqsave(&priv->lock, flags);
/* set HW default power register */
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index bab08a40fe66..56c3cf3ba53d 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -205,6 +205,55 @@ static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
iowrite8(0, iobase + MAC_REG_PAGE1SEL);
}
+static void vt6655_mac_dma_ctl(void __iomem *iobase, u8 reg_index)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + reg_index);
+ if (reg_value & DMACTL_RUN)
+ iowrite32(DMACTL_WAKE, iobase + reg_index);
+ else
+ iowrite32(DMACTL_RUN, iobase + reg_index);
+}
+
+static void vt6655_mac_set_bits(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value | mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
+
+static void vt6655_mac_clear_bits(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value & ~mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
+
+static void vt6655_mac_en_protect_md(void __iomem *iobase)
+{
+ vt6655_mac_set_bits(iobase, ENCFG_PROTECTMD);
+}
+
+static void vt6655_mac_dis_protect_md(void __iomem *iobase)
+{
+ vt6655_mac_clear_bits(iobase, ENCFG_PROTECTMD);
+}
+
+static void vt6655_mac_en_barker_preamble_md(void __iomem *iobase)
+{
+ vt6655_mac_set_bits(iobase, ENCFG_BARKERPREAM);
+}
+
+static void vt6655_mac_dis_barker_preamble_md(void __iomem *iobase)
+{
+ vt6655_mac_clear_bits(iobase, ENCFG_BARKERPREAM);
+}
+
/*
* Initialisation of MAC & BBP registers
*/
@@ -351,11 +400,11 @@ static void device_init_registers(struct vnt_private *priv)
}
if (priv->local_id > REV_ID_VT3253_B1) {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
/* use relative tx timeout and 802.11i D4 */
@@ -363,7 +412,7 @@ static void device_init_registers(struct vnt_private *priv)
(CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
- MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
+ vt6655_mac_set_short_retry_limit(priv, priv->byShortRetryLimit);
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
@@ -420,8 +469,8 @@ static void device_init_registers(struct vnt_private *priv)
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
/* Turn On Rx DMA */
- MACvReceive0(priv->port_offset);
- MACvReceive1(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
/* start the adapter */
iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
@@ -537,13 +586,12 @@ static void device_free_rings(struct vnt_private *priv)
priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
priv->aRD0Ring, priv->pool_dma);
- if (priv->tx0_bufs)
- dma_free_coherent(&priv->pcid->dev,
- priv->opts.tx_descs[0] * PKT_BUF_SZ +
- priv->opts.tx_descs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- priv->tx0_bufs, priv->tx_bufs_dma0);
+ dma_free_coherent(&priv->pcid->dev,
+ priv->opts.tx_descs[0] * PKT_BUF_SZ +
+ priv->opts.tx_descs[1] * PKT_BUF_SZ +
+ CB_BEACON_BUF_SIZE +
+ CB_MAX_BUF_SIZE,
+ priv->tx0_bufs, priv->tx_bufs_dma0);
}
static int device_init_rd0_ring(struct vnt_private *priv)
@@ -583,7 +631,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -629,7 +677,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -694,7 +742,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
@@ -734,7 +782,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
@@ -1135,8 +1183,8 @@ static void vnt_interrupt_process(struct vnt_private *priv)
isr = ioread32(priv->port_offset + MAC_REG_ISR);
- MACvReceive0(priv->port_offset);
- MACvReceive1(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
if (max_count > priv->opts.int_works)
break;
@@ -1218,9 +1266,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
wmb(); /* second memory barrier */
if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
- MACvTransmitAC0(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_AC0DMACTL);
else
- MACvTransmit0(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_TXDMACTL0);
priv->iTDUsed[dma_idx]++;
@@ -1440,19 +1488,19 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
- MACvEnableBarkerPreambleMd(priv->port_offset);
+ vt6655_mac_en_barker_preamble_md(priv->port_offset);
priv->preamble_type = true;
} else {
- MACvDisableBarkerPreambleMd(priv->port_offset);
+ vt6655_mac_dis_barker_preamble_md(priv->port_offset);
priv->preamble_type = false;
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (conf->use_cts_prot)
- MACvEnableProtectMD(priv->port_offset);
+ vt6655_mac_en_protect_md(priv->port_offset);
else
- MACvDisableProtectMD(priv->port_offset);
+ vt6655_mac_dis_protect_md(priv->port_offset);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1538,21 +1586,21 @@ static void vnt_configure(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
if (priv->mc_list_count > 2) {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
} else {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
multicast = le64_to_cpu(multicast);
iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
iowrite32((u32)(multicast >> 32),
priv->port_offset + MAC_REG_MAR0 + 4);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index dcc649532737..b4ebc7d31961 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -10,17 +10,16 @@
* Date: May 21, 1996
*
* Functions:
- * MACbIsRegBitsOff - Test if All test Bits Off
- * MACbIsIntDisable - Test if MAC interrupt disable
- * MACvSetShortRetryLimit - Set 802.11 Short Retry limit
+ * vt6655_mac_is_reg_bits_off - Test if All test Bits Off
+ * vt6655_mac_set_short_retry_limit - Set 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
- * MACvSetLoopbackMode - Set MAC Loopback Mode
- * MACvSaveContext - Save Context of MAC Registers
- * MACvRestoreContext - Restore Context of MAC Registers
+ * vt6655_mac_set_loopback_mode - Set MAC Loopback Mode
+ * vt6655_mac_save_context - Save Context of MAC Registers
+ * vt6655_mac_restore_context - Restore Context of MAC Registers
* MACbSoftwareReset - Software Reset MAC
- * MACbSafeRxOff - Turn Off MAC Rx
- * MACbSafeTxOff - Turn Off MAC Tx
- * MACbSafeStop - Stop MAC function
+ * vt6655_mac_safe_rx_off - Turn Off MAC Rx
+ * vt6655_mac_safe_tx_off - Turn Off MAC Tx
+ * vt6655_mac_safe_stop - Stop MAC function
* MACbShutdown - Shut down MAC
* MACvInitialize - Initialize MAC
* MACvSetCurrRxDescAddr - Set Rx Descriptors Address
@@ -86,43 +85,21 @@ static void vt6655_mac_clear_stck_ds(void __iomem *iobase)
* Parameters:
* In:
* io_base - Base Address for MAC
- * byRegOfs - Offset of MAC Register
- * byTestBits - Test bits
+ * reg_offset - Offset of MAC Register
+ * mask - Test bits
* Out:
* none
*
* Return Value: true if all test bits Off; otherwise false
*
*/
-bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits)
+static bool vt6655_mac_is_reg_bits_off(struct vnt_private *priv,
+ unsigned char reg_offset,
+ unsigned char mask)
{
void __iomem *io_base = priv->port_offset;
- return !(ioread8(io_base + byRegOfs) & byTestBits);
-}
-
-/*
- * Description:
- * Test if MAC interrupt disable
- *
- * Parameters:
- * In:
- * io_base - Base Address for MAC
- * Out:
- * none
- *
- * Return Value: true if interrupt is disable; otherwise false
- *
- */
-bool MACbIsIntDisable(struct vnt_private *priv)
-{
- void __iomem *io_base = priv->port_offset;
-
- if (ioread32(io_base + MAC_REG_IMR))
- return false;
-
- return true;
+ return !(ioread8(io_base + reg_offset) & mask);
}
/*
@@ -132,19 +109,18 @@ bool MACbIsIntDisable(struct vnt_private *priv)
* Parameters:
* In:
* io_base - Base Address for MAC
- * byRetryLimit- Retry Limit
+ * retry_limit - Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(struct vnt_private *priv,
- unsigned char byRetryLimit)
+void vt6655_mac_set_short_retry_limit(struct vnt_private *priv, unsigned char retry_limit)
{
void __iomem *io_base = priv->port_offset;
/* set SRT */
- iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
+ iowrite8(retry_limit, io_base + MAC_REG_SRT);
}
/*
@@ -176,21 +152,20 @@ void MACvSetLongRetryLimit(struct vnt_private *priv,
* Parameters:
* In:
* io_base - Base Address for MAC
- * byLoopbackMode - Loopback Mode
+ * loopback_mode - Loopback Mode
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
+static void vt6655_mac_set_loopback_mode(struct vnt_private *priv, u8 loopback_mode)
{
void __iomem *io_base = priv->port_offset;
- byLoopbackMode <<= 6;
+ loopback_mode <<= 6;
/* set TCR */
- iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | byLoopbackMode,
- io_base + MAC_REG_TEST);
+ iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | loopback_mode, io_base + MAC_REG_TEST);
}
/*
@@ -206,20 +181,20 @@ void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
* Return Value: none
*
*/
-void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
+static void vt6655_mac_save_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
/* read page0 register */
memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0);
- MACvSelectPage1(io_base);
+ VT6655_MAC_SELECT_PAGE1(io_base);
/* read page1 register */
memcpy_fromio(cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, io_base,
MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(io_base);
+ VT6655_MAC_SELECT_PAGE0(io_base);
}
/*
@@ -236,16 +211,16 @@ void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
* Return Value: none
*
*/
-void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf)
+static void vt6655_mac_restore_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
- MACvSelectPage1(io_base);
+ VT6655_MAC_SELECT_PAGE1(io_base);
/* restore page1 */
memcpy_toio(io_base, cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0,
MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(io_base);
+ VT6655_MAC_SELECT_PAGE0(io_base);
/* restore RCR,TCR,IMR... */
memcpy_toio(io_base + MAC_REG_RCR, cxt_buf + MAC_REG_RCR,
@@ -318,23 +293,20 @@ bool MACbSoftwareReset(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeSoftwareReset(struct vnt_private *priv)
+static void vt6655_mac_save_soft_reset(struct vnt_private *priv)
{
- unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
- bool bRetVal;
+ u8 tmp_reg_data[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
/* PATCH....
* save some important register's value, then do
* reset, then restore register's value
*/
/* save MAC context */
- MACvSaveContext(priv, abyTmpRegData);
+ vt6655_mac_save_context(priv, tmp_reg_data);
/* do reset */
- bRetVal = MACbSoftwareReset(priv);
+ MACbSoftwareReset(priv);
/* restore MAC context, except CR0 */
- MACvRestoreContext(priv, abyTmpRegData);
-
- return bRetVal;
+ vt6655_mac_restore_context(priv, tmp_reg_data);
}
/*
@@ -350,7 +322,7 @@ bool MACbSafeSoftwareReset(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeRxOff(struct vnt_private *priv)
+static bool vt6655_mac_safe_rx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -404,7 +376,7 @@ bool MACbSafeRxOff(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeTxOff(struct vnt_private *priv)
+static bool vt6655_mac_safe_tx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -460,20 +432,20 @@ bool MACbSafeTxOff(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeStop(struct vnt_private *priv)
+static bool vt6655_mac_safe_stop(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
vt6655_mac_reg_bits_off(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
- if (!MACbSafeRxOff(priv)) {
- pr_debug(" MACbSafeRxOff == false)\n");
- MACbSafeSoftwareReset(priv);
+ if (!vt6655_mac_safe_rx_off(priv)) {
+ pr_debug(" vt6655_mac_safe_rx_off == false)\n");
+ vt6655_mac_save_soft_reset(priv);
return false;
}
- if (!MACbSafeTxOff(priv)) {
- pr_debug(" MACbSafeTxOff == false)\n");
- MACbSafeSoftwareReset(priv);
+ if (!vt6655_mac_safe_tx_off(priv)) {
+ pr_debug(" vt6655_mac_safe_tx_off == false)\n");
+ vt6655_mac_save_soft_reset(priv);
return false;
}
@@ -500,13 +472,13 @@ bool MACbShutdown(struct vnt_private *priv)
void __iomem *io_base = priv->port_offset;
/* disable MAC IMR */
iowrite32(0, io_base + MAC_REG_IMR);
- MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_INTERNAL);
/* stop the adapter */
- if (!MACbSafeStop(priv)) {
- MACvSetLoopbackMode(priv, MAC_LB_NONE);
+ if (!vt6655_mac_safe_stop(priv)) {
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return false;
}
- MACvSetLoopbackMode(priv, MAC_LB_NONE);
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return true;
}
@@ -555,7 +527,7 @@ void MACvInitialize(struct vnt_private *priv)
* Return Value: none
*
*/
-void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
+void vt6655_mac_set_curr_rx_0_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -589,7 +561,7 @@ void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
* Return Value: none
*
*/
-void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
+void vt6655_mac_set_curr_rx_1_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -623,8 +595,7 @@ void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
* Return Value: none
*
*/
-void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr)
+static void vt6655_mac_set_curr_tx_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -659,8 +630,7 @@ void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
*
*/
/* TxDMA1 = AC0DMA */
-void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr)
+static void vt6655_mac_set_curr_ac_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -681,13 +651,12 @@ void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL);
}
-void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
- u32 curr_desc_addr)
+void vt6655_mac_set_curr_tx_desc_addr(int tx_type, struct vnt_private *priv, u32 curr_desc_addr)
{
- if (iTxType == TYPE_AC0DMA)
- MACvSetCurrAC0DescAddrEx(priv, curr_desc_addr);
- else if (iTxType == TYPE_TXDMA0)
- MACvSetCurrTx0DescAddrEx(priv, curr_desc_addr);
+ if (tx_type == TYPE_AC0DMA)
+ vt6655_mac_set_curr_ac_0_desc_addr_ex(priv, curr_desc_addr);
+ else if (tx_type == TYPE_TXDMA0)
+ vt6655_mac_set_curr_tx_0_desc_addr_ex(priv, curr_desc_addr);
}
/*
@@ -767,7 +736,7 @@ bool MACbPSWakeup(struct vnt_private *priv)
void __iomem *io_base = priv->port_offset;
unsigned int ww;
/* Read PSCTL */
- if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS))
+ if (vt6655_mac_is_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_PS))
return true;
/* Disable PS */
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 0122c4603c66..acf931c3f5fd 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,7 +12,7 @@
* Revision History:
* 07-01-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-25-2003 Kyle Hsu: Porting MAC functions from sim53.
- * 09-03-2003 Bryan YC Fan: Add MACvDisableProtectMD & MACvEnableProtectMD
+ * 09-03-2003 Bryan YC Fan: Add vt6655_mac_dis_protect_md & vt6655_mac_en_protect_md
*/
#ifndef __MAC_H__
@@ -537,95 +537,9 @@
/*--------------------- Export Macros ------------------------------*/
-#define MACvReceive0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL0); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL0); \
-} while (0)
-
-#define MACvReceive1(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL1); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL1); \
-} while (0)
-
-#define MACvTransmit0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_TXDMACTL0); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_TXDMACTL0); \
-} while (0)
-
-#define MACvTransmitAC0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_AC0DMACTL); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_AC0DMACTL); \
-} while (0)
-
-#define MACvSelectPage0(iobase) \
- iowrite8(0, iobase + MAC_REG_PAGE1SEL)
-
-#define MACvSelectPage1(iobase) \
- iowrite8(1, iobase + MAC_REG_PAGE1SEL)
-
-#define MACvEnableProtectMD(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvDisableProtectMD(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvEnableBarkerPreambleMd(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvDisableBarkerPreambleMd(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvSetBBType(iobase, byTyp) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \
- dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvSetRFLE_LatchBase(iobase) \
- vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, iobase + MAC_REG_PAGE1SEL)
+
+#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, iobase + MAC_REG_PAGE1SEL)
#define MAKEWORD(lb, hb) \
((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
@@ -635,38 +549,16 @@ void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, cons
void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask);
void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask);
-bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits);
-
-bool MACbIsIntDisable(struct vnt_private *priv);
-
-void MACvSetShortRetryLimit(struct vnt_private *priv,
- unsigned char byRetryLimit);
+void vt6655_mac_set_short_retry_limit(struct vnt_private *priv, unsigned char retry_limit);
void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
-void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode);
-
-void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf);
-void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf);
-
bool MACbSoftwareReset(struct vnt_private *priv);
-bool MACbSafeSoftwareReset(struct vnt_private *priv);
-bool MACbSafeRxOff(struct vnt_private *priv);
-bool MACbSafeTxOff(struct vnt_private *priv);
-bool MACbSafeStop(struct vnt_private *priv);
bool MACbShutdown(struct vnt_private *priv);
void MACvInitialize(struct vnt_private *priv);
-void MACvSetCurrRx0DescAddr(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrRx1DescAddr(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr);
+void vt6655_mac_set_curr_rx_0_desc_addr(struct vnt_private *priv, u32 curr_desc_addr);
+void vt6655_mac_set_curr_rx_1_desc_addr(struct vnt_private *priv, u32 curr_desc_addr);
+void vt6655_mac_set_curr_tx_desc_addr(int tx_type, struct vnt_private *priv, u32 curr_desc_addr);
void MACvSetCurrSyncDescAddrEx(struct vnt_private *priv,
u32 curr_desc_addr);
void MACvSetCurrATIMDescAddrEx(struct vnt_private *priv,
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 5654dc54ae91..1cee51a1075e 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -137,8 +137,6 @@ struct p80211_frmrx {
/* called by /proc/net/wireless */
struct iw_statistics *p80211wext_get_wireless_stats(struct net_device *dev);
-/* wireless extensions' ioctls */
-extern struct iw_handler_def p80211wext_handler_def;
/* WEP stuff */
#define NUM_WEPKEYS 4
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 3336d2b78bf7..d9204c590d9a 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1202,7 +1202,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
opt2 |= T5_ISS_F;
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
opt2 |= T5_OPT_2_VALID_F;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fb91423a4e2e..c8470e7c0e10 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -164,6 +164,9 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
+ /* Skip empty port groups */
+ if (!tg_pt_gp->tg_pt_gp_members)
+ continue;
/*
* Check if the Target port group and Target port descriptor list
* based on tg_pt_gp_members count will fit into the response payload.
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index a889a6237d9c..30fcf69e1a1d 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -133,8 +133,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
/* target_core_transport.c */
-extern struct kmem_cache *se_tmr_req_cache;
-
int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8a7306e5e133..69a4c9581e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -500,7 +500,7 @@ static int pscsi_configure_device(struct se_device *dev)
continue;
/*
* Functions will release the held struct scsi_host->host_lock
- * before calling calling pscsi_add_device_to_list() to register
+ * before calling pscsi_add_device_to_list() to register
* struct scsi_device with target_core_mod.
*/
switch (sd->type) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index c14441c89bed..7cca3b15472b 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -115,6 +115,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[5] |= 0x1;
}
+ /*
+ * Set MULTIP bit to indicate presence of multiple SCSI target ports
+ */
+ if (dev->export_count > 1)
+ buf[6] |= 0x10;
+
buf[7] = 0x2; /* CmdQue=1 */
/*
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 28f87cd8b3ed..290b1bb0e9cd 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p)
#endif
}
-static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+static int __check_mem_type(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- while (vma && is_normal_memory(vma->vm_page_prot)) {
- if (vma->vm_end >= end)
- return 0;
- vma = vma->vm_next;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, start);
+
+ for_each_vma_range(vmi, vma, end) {
+ if (!is_normal_memory(vma->vm_page_prot))
+ return -EINVAL;
}
- return -EINVAL;
+ return 0;
}
int optee_check_mem_type(unsigned long start, size_t num_pages)
@@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return 0;
mmap_read_lock(mm);
- rc = __check_mem_type(find_vma(mm, start),
- start + num_pages * PAGE_SIZE);
+ rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
mmap_read_unlock(mm);
return rc;
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index def8e1a0399c..2506c6c8ca83 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o
obj-y += intel/
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-y += st/
-obj-$(CONFIG_QCOM_TSENS) += qcom/
+obj-y += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 10bfa6507eb4..5d92b70a5d53 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -76,59 +76,55 @@ static const struct thermal_zone_device_ops imx_sc_thermal_ops = {
static int imx_sc_thermal_probe(struct platform_device *pdev)
{
- struct device_node *np, *child, *sensor_np;
struct imx_sc_sensor *sensor;
- int ret;
+ const int *resource_id;
+ int i, ret;
ret = imx_scu_get_handle(&thermal_ipc_handle);
if (ret)
return ret;
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np)
- return -ENODEV;
+ resource_id = of_device_get_match_data(&pdev->dev);
+ if (!resource_id)
+ return -EINVAL;
- sensor_np = of_node_get(pdev->dev.of_node);
+ for (i = 0; resource_id[i] > 0; i++) {
- for_each_available_child_of_node(np, child) {
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
- if (!sensor) {
- of_node_put(child);
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!sensor)
+ return -ENOMEM;
- ret = thermal_zone_of_get_sensor_id(child,
- sensor_np,
- &sensor->resource_id);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to get valid sensor resource id: %d\n",
- ret);
- of_node_put(child);
- break;
- }
+ sensor->resource_id = resource_id[i];
- sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
- sensor->resource_id,
- sensor,
- &imx_sc_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->resource_id,
+ sensor, &imx_sc_thermal_ops);
if (IS_ERR(sensor->tzd)) {
- dev_err(&pdev->dev, "failed to register thermal zone\n");
+ /*
+ * Save the error value before freeing the
+ * sensor pointer, otherwise we endup with a
+ * use-after-free error
+ */
ret = PTR_ERR(sensor->tzd);
- of_node_put(child);
- break;
+
+ devm_kfree(&pdev->dev, sensor);
+
+ /*
+ * The thermal framework notifies us there is
+ * no thermal zone description for such a
+ * sensor id
+ */
+ if (ret == -ENODEV)
+ continue;
+
+ dev_err(&pdev->dev, "failed to register thermal zone\n");
+ return ret;
}
if (devm_thermal_add_hwmon_sysfs(sensor->tzd))
dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
}
-put_node:
- of_node_put(sensor_np);
- of_node_put(np);
-
- return ret;
+ return 0;
}
static int imx_sc_thermal_remove(struct platform_device *pdev)
@@ -136,8 +132,10 @@ static int imx_sc_thermal_remove(struct platform_device *pdev)
return 0;
}
+static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+
static const struct of_device_id imx_sc_thermal_table[] = {
- { .compatible = "fsl,imx-sc-thermal", },
+ { .compatible = "fsl,imx-sc-thermal", .data = imx_sc_sensors },
{}
};
MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index bfd889422dd3..2c7f3f9a26eb 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config QCOM_TSENS
tristate "Qualcomm TSENS Temperature Alarm"
- depends on QCOM_QFPROM
+ depends on NVMEM_QCOM_QFPROM
depends on ARCH_QCOM || COMPILE_TEST
help
This enables the thermal sysfs driver for the TSENS device. It shows
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index af68adf720cc..1b2c43eab27d 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -829,7 +829,8 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
}
channel->adc_channel = args.args[0];
- channel->iio = devm_of_iio_channel_get_by_name(adc_tm->dev, node, NULL);
+ channel->iio = devm_fwnode_iio_channel_get_by_name(adc_tm->dev,
+ of_fwnode_handle(node), NULL);
if (IS_ERR(channel->iio)) {
ret = PTR_ERR(channel->iio);
if (ret != -EPROBE_DEFER)
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index f136cb350238..327f37202c69 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -604,7 +604,7 @@ static const struct tsens_ops ops_8939 = {
struct tsens_plat_data data_8939 = {
.num_sensors = 10,
.ops = &ops_8939,
- .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 },
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 4df42d70d867..61c2b8855cb8 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -316,7 +316,7 @@ static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
return 0;
}
-static struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
+static const struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
.get_temp = rcar_thermal_get_temp,
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 7e669b60a065..117eeaf7dd24 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1186,7 +1186,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (type && strlen(type) >= THERMAL_NAME_LENGTH) {
+ if (strlen(type) >= THERMAL_NAME_LENGTH) {
pr_err("Thermal zone name (%s) too long, should be under %d chars\n",
type, THERMAL_NAME_LENGTH);
return ERR_PTR(-EINVAL);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index fd2fb84bf246..d4b6335ace15 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -130,50 +130,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
return -EINVAL;
}
-/**
- * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
- * @tz_np: a valid thermal zone device node.
- * @sensor_np: a sensor node of a valid sensor device.
- * @id: the sensor ID returned if success.
- *
- * This function will get sensor ID from a given thermal zone node and
- * the sensor node must match the temperature provider @sensor_np.
- *
- * Return: 0 on success, proper error code otherwise.
- */
-
-int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id)
-{
- struct of_phandle_args sensor_specs;
- int ret;
-
- ret = of_parse_phandle_with_args(tz_np,
- "thermal-sensors",
- "#thermal-sensor-cells",
- 0,
- &sensor_specs);
- if (ret)
- return ret;
-
- if (sensor_specs.np != sensor_np) {
- of_node_put(sensor_specs.np);
- return -ENODEV;
- }
-
- if (sensor_specs.args_count > 1)
- pr_warn("%pOFn: too many cells in sensor specifier %d\n",
- sensor_specs.np, sensor_specs.args_count);
-
- *id = sensor_specs.args_count ? sensor_specs.args[0] : 0;
-
- of_node_put(sensor_specs.np);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
-
/*** functions parsing device tree nodes ***/
static int of_find_trip_id(struct device_node *np, struct device_node *trip)
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 78c5841bdfae..ec495c7dff03 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -128,9 +128,11 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
- ret = tz->ops->set_trip_temp(tz, trip, temperature);
- if (ret)
- return ret;
+ if (tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip, temperature);
+ if (ret)
+ return ret;
+ }
if (tz->trips)
tz->trips[trip].temperature = temperature;
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index f12d0a3ee3e2..448fd2ec8f6e 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -27,6 +27,16 @@ config USB4_DEBUGFS_WRITE
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
+config USB4_DEBUGFS_MARGINING
+ bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)"
+ depends on DEBUG_FS
+ depends on USB4_DEBUGFS_WRITE
+ help
+ Enables hardware and software based receiver lane margining support
+ under each USB4 port. Used for electrical quality and robustness
+ validation during manufacturing. Should not be enabled by distro
+ kernels.
+
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
depends on USB4 && KUNIT=y
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index c850b0ac098c..834bcad42e9f 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include "tb.h"
+#include "sb_regs.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_POWER_LEN 2
@@ -187,6 +188,828 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
#define DEBUGFS_MODE 0400
#endif
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
+/**
+ * struct tb_margining - Lane margining support
+ * @caps: Port lane margining capabilities
+ * @results: Last lane margining results
+ * @lanes: %0, %1 or %7 (all)
+ * @min_ber_level: Minimum supported BER level contour value
+ * @max_ber_level: Maximum supported BER level contour value
+ * @ber_level: Current BER level contour value
+ * @voltage_steps: Number of mandatory voltage steps
+ * @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @time_steps: Number of time margin steps
+ * @max_time_offset: Maximum time margin offset (in mUI)
+ * @software: %true if software margining is used instead of hardware
+ * @time: %true if time margining is used instead of voltage
+ * @right_high: %false if left/low margin test is performed, %true if
+ * right/high
+ */
+struct tb_margining {
+ u32 caps[2];
+ u32 results[2];
+ unsigned int lanes;
+ unsigned int min_ber_level;
+ unsigned int max_ber_level;
+ unsigned int ber_level;
+ unsigned int voltage_steps;
+ unsigned int max_voltage_offset;
+ unsigned int time_steps;
+ unsigned int max_time_offset;
+ bool software;
+ bool time;
+ bool right_high;
+};
+
+static bool supports_software(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+}
+
+static bool supports_hardware(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+}
+
+static bool both_lanes(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
+}
+
+static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
+{
+ return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
+ USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
+}
+
+static bool supports_time(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+}
+
+/* Only applicable if supports_time() returns true */
+static unsigned int independent_time_margins(const struct usb4_port *usb4)
+{
+ return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
+}
+
+static ssize_t
+margining_ber_level_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ unsigned int val;
+ int ret = 0;
+ char *buf;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (usb4->margining->software) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto out_unlock;
+ }
+
+ buf[count - 1] = '\0';
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ goto out_free;
+
+ if (val < usb4->margining->min_ber_level ||
+ val > usb4->margining->max_ber_level) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ usb4->margining->ber_level = val;
+
+out_free:
+ free_page((unsigned long)buf);
+out_unlock:
+ mutex_unlock(&tb->lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static void ber_level_show(struct seq_file *s, unsigned int val)
+{
+ if (val % 2)
+ seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val);
+ else
+ seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val);
+}
+
+static int margining_ber_level_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+
+ if (usb4->margining->software)
+ return -EINVAL;
+ ber_level_show(s, usb4->margining->ber_level);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_ber_level);
+
+static int margining_caps_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ u32 cap0, cap1;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ /* Dump the raw caps first */
+ cap0 = usb4->margining->caps[0];
+ seq_printf(s, "0x%08x\n", cap0);
+ cap1 = usb4->margining->caps[1];
+ seq_printf(s, "0x%08x\n", cap1);
+
+ seq_printf(s, "# software margining: %s\n",
+ supports_software(usb4) ? "yes" : "no");
+ if (supports_hardware(usb4)) {
+ seq_puts(s, "# hardware margining: yes\n");
+ seq_puts(s, "# minimum BER level contour: ");
+ ber_level_show(s, usb4->margining->min_ber_level);
+ seq_puts(s, "# maximum BER level contour: ");
+ ber_level_show(s, usb4->margining->max_ber_level);
+ } else {
+ seq_puts(s, "# hardware margining: no\n");
+ }
+
+ seq_printf(s, "# both lanes simultaneously: %s\n",
+ both_lanes(usb4) ? "yes" : "no");
+ seq_printf(s, "# voltage margin steps: %u\n",
+ usb4->margining->voltage_steps);
+ seq_printf(s, "# maximum voltage offset: %u mV\n",
+ usb4->margining->max_voltage_offset);
+
+ switch (independent_voltage_margins(usb4)) {
+ case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ seq_puts(s, "# returns minimum between high and low voltage margins\n");
+ break;
+ case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ seq_puts(s, "# returns high or low voltage margin\n");
+ break;
+ case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
+ seq_puts(s, "# returns both high and low margins\n");
+ break;
+ }
+
+ if (supports_time(usb4)) {
+ seq_puts(s, "# time margining: yes\n");
+ seq_printf(s, "# time margining is destructive: %s\n",
+ cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
+
+ switch (independent_time_margins(usb4)) {
+ case USB4_MARGIN_CAP_1_TIME_MIN:
+ seq_puts(s, "# returns minimum between left and right time margins\n");
+ break;
+ case USB4_MARGIN_CAP_1_TIME_LR:
+ seq_puts(s, "# returns left or right margin\n");
+ break;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ seq_puts(s, "# returns both left and right margins\n");
+ break;
+ }
+
+ seq_printf(s, "# time margin steps: %u\n",
+ usb4->margining->time_steps);
+ seq_printf(s, "# maximum time offset: %u mUI\n",
+ usb4->margining->max_time_offset);
+ } else {
+ seq_puts(s, "# time margining: no\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RO(margining_caps);
+
+static ssize_t
+margining_lanes_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "0")) {
+ usb4->margining->lanes = 0;
+ } else if (!strcmp(buf, "1")) {
+ usb4->margining->lanes = 1;
+ } else if (!strcmp(buf, "all")) {
+ /* Needs to be supported */
+ if (both_lanes(usb4))
+ usb4->margining->lanes = 7;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret < 0 ? ret : count;
+}
+
+static int margining_lanes_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ unsigned int lanes;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ lanes = usb4->margining->lanes;
+ if (both_lanes(usb4)) {
+ if (!lanes)
+ seq_puts(s, "[0] 1 all\n");
+ else if (lanes == 1)
+ seq_puts(s, "0 [1] all\n");
+ else
+ seq_puts(s, "0 1 [all]\n");
+ } else {
+ if (!lanes)
+ seq_puts(s, "[0] 1\n");
+ else
+ seq_puts(s, "0 [1]\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_lanes);
+
+static ssize_t margining_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "software")) {
+ if (supports_software(usb4))
+ usb4->margining->software = true;
+ else
+ ret = -EINVAL;
+ } else if (!strcmp(buf, "hardware")) {
+ if (supports_hardware(usb4))
+ usb4->margining->software = false;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_mode_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_port *port = s->private;
+ const struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ const char *space = "";
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (supports_software(usb4)) {
+ if (usb4->margining->software)
+ seq_puts(s, "[software]");
+ else
+ seq_puts(s, "software");
+ space = " ";
+ }
+ if (supports_hardware(usb4)) {
+ if (usb4->margining->software)
+ seq_printf(s, "%shardware", space);
+ else
+ seq_printf(s, "%s[hardware]", space);
+ }
+
+ mutex_unlock(&tb->lock);
+
+ seq_puts(s, "\n");
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_mode);
+
+static int margining_run_write(void *data, u64 val)
+{
+ struct tb_port *port = data;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb_switch *sw = port->sw;
+ struct tb_margining *margining;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ /*
+ * CL states may interfere with lane margining so inform the user know
+ * and bail out.
+ */
+ if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
+ tb_port_warn(port,
+ "CL states are enabled, Disable them with clx=0 and re-connect\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ margining = usb4->margining;
+
+ if (margining->software) {
+ tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
+ margining->time ? "time" : "voltage", margining->lanes);
+ ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
+ margining->right_high,
+ USB4_MARGIN_SW_COUNTER_CLEAR);
+ if (ret)
+ goto out_unlock;
+
+ ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
+ } else {
+ tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
+ margining->time ? "time" : "voltage", margining->lanes);
+ /* Clear the results */
+ margining->results[0] = 0;
+ margining->results[1] = 0;
+ ret = usb4_port_hw_margin(port, margining->lanes,
+ margining->ber_level, margining->time,
+ margining->right_high, margining->results);
+ }
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write,
+ "%llu\n");
+
+static ssize_t margining_results_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ /* Just clear the results */
+ usb4->margining->results[0] = 0;
+ usb4->margining->results[1] = 0;
+
+ mutex_unlock(&tb->lock);
+ return count;
+}
+
+static void voltage_margin_show(struct seq_file *s,
+ const struct tb_margining *margining, u8 val)
+{
+ unsigned int tmp, voltage;
+
+ tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
+ seq_printf(s, "%u mV (%u)", voltage, tmp);
+ if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ seq_puts(s, " exceeds maximum");
+ seq_puts(s, "\n");
+}
+
+static void time_margin_show(struct seq_file *s,
+ const struct tb_margining *margining, u8 val)
+{
+ unsigned int tmp, interval;
+
+ tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ interval = tmp * margining->max_time_offset / margining->time_steps;
+ seq_printf(s, "%u mUI (%u)", interval, tmp);
+ if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ seq_puts(s, " exceeds maximum");
+ seq_puts(s, "\n");
+}
+
+static int margining_results_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ margining = usb4->margining;
+ /* Dump the raw results first */
+ seq_printf(s, "0x%08x\n", margining->results[0]);
+ /* Only the hardware margining has two result dwords */
+ if (!margining->software) {
+ unsigned int val;
+
+ seq_printf(s, "0x%08x\n", margining->results[1]);
+
+ if (margining->time) {
+ if (!margining->lanes || margining->lanes == 7) {
+ val = margining->results[1];
+ seq_puts(s, "# lane 0 right time margin: ");
+ time_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 0 left time margin: ");
+ time_margin_show(s, margining, val);
+ }
+ if (margining->lanes == 1 || margining->lanes == 7) {
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 right time margin: ");
+ time_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 left time margin: ");
+ time_margin_show(s, margining, val);
+ }
+ } else {
+ if (!margining->lanes || margining->lanes == 7) {
+ val = margining->results[1];
+ seq_puts(s, "# lane 0 high voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 0 low voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ }
+ if (margining->lanes == 1 || margining->lanes == 7) {
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 high voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 low voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ }
+ }
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_results);
+
+static ssize_t margining_test_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "time") && supports_time(usb4))
+ usb4->margining->time = true;
+ else if (!strcmp(buf, "voltage"))
+ usb4->margining->time = false;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_test_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (supports_time(usb4)) {
+ if (usb4->margining->time)
+ seq_puts(s, "voltage [time]\n");
+ else
+ seq_puts(s, "[voltage] time\n");
+ } else {
+ seq_puts(s, "[voltage]\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_test);
+
+static ssize_t margining_margin_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (usb4->margining->time) {
+ if (!strcmp(buf, "left"))
+ usb4->margining->right_high = false;
+ else if (!strcmp(buf, "right"))
+ usb4->margining->right_high = true;
+ else
+ ret = -EINVAL;
+ } else {
+ if (!strcmp(buf, "low"))
+ usb4->margining->right_high = false;
+ else if (!strcmp(buf, "high"))
+ usb4->margining->right_high = true;
+ else
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_margin_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (usb4->margining->time) {
+ if (usb4->margining->right_high)
+ seq_puts(s, "left [right]\n");
+ else
+ seq_puts(s, "[left] right\n");
+ } else {
+ if (usb4->margining->right_high)
+ seq_puts(s, "low [high]\n");
+ else
+ seq_puts(s, "[low] high\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_margin);
+
+static void margining_port_init(struct tb_port *port)
+{
+ struct tb_margining *margining;
+ struct dentry *dir, *parent;
+ struct usb4_port *usb4;
+ char dir_name[10];
+ unsigned int val;
+ int ret;
+
+ usb4 = port->usb4;
+ if (!usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+
+ margining = kzalloc(sizeof(*margining), GFP_KERNEL);
+ if (!margining)
+ return;
+
+ ret = usb4_port_margining_caps(port, margining->caps);
+ if (ret) {
+ kfree(margining);
+ return;
+ }
+
+ usb4->margining = margining;
+
+ /* Set the initial mode */
+ if (supports_software(usb4))
+ margining->software = true;
+
+ val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
+ USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
+ margining->voltage_steps = val;
+ val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
+ USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
+ margining->max_voltage_offset = 74 + val * 2;
+
+ if (supports_time(usb4)) {
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
+ margining->time_steps = val;
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
+ /*
+ * Store it as mUI (milli Unit Interval) because we want
+ * to keep it as integer.
+ */
+ margining->max_time_offset = 200 + 10 * val;
+ }
+
+ dir = debugfs_create_dir("margining", parent);
+ if (supports_hardware(usb4)) {
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
+ USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
+ margining->min_ber_level = val;
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
+ USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
+ margining->max_ber_level = val;
+
+ /* Set the default to minimum */
+ margining->ber_level = margining->min_ber_level;
+
+ debugfs_create_file("ber_level_contour", 0400, dir, port,
+ &margining_ber_level_fops);
+ }
+ debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
+ debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
+ debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
+ debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
+ debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
+ debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
+ if (independent_voltage_margins(usb4) ||
+ (supports_time(usb4) && independent_time_margins(usb4)))
+ debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+}
+
+static void margining_port_remove(struct tb_port *port)
+{
+ struct dentry *parent;
+ char dir_name[10];
+
+ if (!port->usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ debugfs_remove_recursive(debugfs_lookup("margining", parent));
+
+ kfree(port->usb4->margining);
+ port->usb4->margining = NULL;
+}
+
+static void margining_switch_init(struct tb_switch *sw)
+{
+ struct tb_port *upstream, *downstream;
+ struct tb_switch *parent_sw;
+ u64 route = tb_route(sw);
+
+ if (!route)
+ return;
+
+ upstream = tb_upstream_port(sw);
+ parent_sw = tb_switch_parent(sw);
+ downstream = tb_port_at(route, parent_sw);
+
+ margining_port_init(downstream);
+ margining_port_init(upstream);
+}
+
+static void margining_switch_remove(struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+ u64 route = tb_route(sw);
+
+ if (!route)
+ return;
+
+ /*
+ * Upstream is removed with the router itself but we need to
+ * remove the downstream port margining directory.
+ */
+ parent_sw = tb_switch_parent(sw);
+ downstream = tb_port_at(route, parent_sw);
+ margining_port_remove(downstream);
+}
+
+static void margining_xdomain_init(struct tb_xdomain *xd)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+
+ parent_sw = tb_xdomain_parent(xd);
+ downstream = tb_port_at(xd->route, parent_sw);
+
+ margining_port_init(downstream);
+}
+
+static void margining_xdomain_remove(struct tb_xdomain *xd)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+
+ parent_sw = tb_xdomain_parent(xd);
+ downstream = tb_port_at(xd->route, parent_sw);
+ margining_port_remove(downstream);
+}
+#else
+static inline void margining_switch_init(struct tb_switch *sw) { }
+static inline void margining_switch_remove(struct tb_switch *sw) { }
+static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
+static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
+#endif
+
static int port_clear_all_counters(struct tb_port *port)
{
u32 *buf;
@@ -689,6 +1512,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
}
+
+ margining_switch_init(sw);
}
/**
@@ -699,9 +1524,20 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
*/
void tb_switch_debugfs_remove(struct tb_switch *sw)
{
+ margining_switch_remove(sw);
debugfs_remove_recursive(sw->debugfs_dir);
}
+void tb_xdomain_debugfs_init(struct tb_xdomain *xd)
+{
+ margining_xdomain_init(xd);
+}
+
+void tb_xdomain_debugfs_remove(struct tb_xdomain *xd)
+{
+ margining_xdomain_remove(xd);
+}
+
/**
* tb_service_debugfs_init() - Add debugfs directory for service
* @svc: Thunderbolt service pointer
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 99211f35a5cd..ec7b5f65804e 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -144,11 +144,9 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
if (!uuid_is_null(&uuids[i]))
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
- &uuids[i]);
+ ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
- i < tb->nboot_acl - 1 ? "," : "\n");
+ ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
}
out:
@@ -247,7 +245,7 @@ static ssize_t deauthorization_show(struct device *dev,
tb->security_level == TB_SECURITY_SECURE)
deauthorization = !!tb->cm_ops->disapprove_switch;
- return sprintf(buf, "%d\n", deauthorization);
+ return sysfs_emit(buf, "%d\n", deauthorization);
}
static DEVICE_ATTR_RO(deauthorization);
@@ -270,7 +268,7 @@ static ssize_t security_show(struct device *dev, struct device_attribute *attr,
if (tb->security_level < ARRAY_SIZE(tb_security_names))
name = tb_security_names[tb->security_level];
- return sprintf(buf, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(security);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 572b5896caa3..86521ebb2579 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2518,6 +2518,9 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
case PCI_DEVICE_ID_INTEL_RPL_NHI0:
case PCI_DEVICE_ID_INTEL_RPL_NHI1:
+ case PCI_DEVICE_ID_INTEL_MTL_M_NHI0:
+ case PCI_DEVICE_ID_INTEL_MTL_P_NHI0:
+ case PCI_DEVICE_ID_INTEL_MTL_P_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index b5cd9673e15d..4dce2edd86ea 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1184,6 +1184,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
+ struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
@@ -1214,10 +1215,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
- if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
@@ -1258,26 +1257,21 @@ static struct tb *nhi_select_cm(struct tb_nhi *nhi)
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
- if (!nhi_imr_valid(pdev)) {
- dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
- return -ENODEV;
- }
+ if (!nhi_imr_valid(pdev))
+ return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
res = pcim_enable_device(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res) {
- dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
@@ -1288,7 +1282,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
- dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
+ dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -1301,18 +1295,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (res) {
- dev_err(&pdev->dev, "failed to set DMA mask\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
@@ -1323,13 +1313,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
tb = nhi_select_cm(nhi);
- if (!tb) {
- dev_err(&nhi->pdev->dev,
+ if (!tb)
+ return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
- return -ENODEV;
- }
- dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
@@ -1433,6 +1421,7 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Thunderbolt 4 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
@@ -1449,6 +1438,12 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 01190d9ced16..b0718020c6f5 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -75,6 +75,9 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
+#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
+#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
+#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index b3f310389378..3dd5f81bd629 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -12,19 +12,315 @@
#include "tb.h"
+/* Intel specific NVM offsets */
+#define INTEL_NVM_DEVID 0x05
+#define INTEL_NVM_VERSION 0x08
+#define INTEL_NVM_CSS 0x10
+#define INTEL_NVM_FLASH_SIZE 0x45
+
+/* ASMedia specific NVM offsets */
+#define ASMEDIA_NVM_DATE 0x1c
+#define ASMEDIA_NVM_VERSION 0x28
+
static DEFINE_IDA(nvm_ida);
/**
+ * struct tb_nvm_vendor_ops - Vendor specific NVM operations
+ * @read_version: Reads out NVM version from the flash
+ * @validate: Validates the NVM image before update (optional)
+ * @write_headers: Writes headers before the rest of the image (optional)
+ */
+struct tb_nvm_vendor_ops {
+ int (*read_version)(struct tb_nvm *nvm);
+ int (*validate)(struct tb_nvm *nvm);
+ int (*write_headers)(struct tb_nvm *nvm);
+};
+
+/**
+ * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
+ * @vendor: Vendor ID
+ * @vops: Vendor specific NVM operations
+ *
+ * Maps vendor ID to NVM vendor operations. If there is no mapping then
+ * NVM firmware upgrade is disabled for the device.
+ */
+struct tb_nvm_vendor {
+ u16 vendor;
+ const struct tb_nvm_vendor_ops *vops;
+};
+
+static int intel_switch_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ u32 val, nvm_size, hdr_size;
+ int ret;
+
+ /*
+ * If the switch is in safe-mode the only accessible portion of
+ * the NVM is the non-active one where userspace is expected to
+ * write new functional NVM.
+ */
+ if (sw->safe_mode)
+ return 0;
+
+ ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - hdr_size) / 2;
+
+ ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val >> 16) & 0xff;
+ nvm->minor = (val >> 8) & 0xff;
+ nvm->active_size = nvm_size;
+
+ return 0;
+}
+
+static int intel_switch_nvm_validate(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ unsigned int image_size, hdr_size;
+ u16 ds_size, device_id;
+ u8 *buf = nvm->buf;
+
+ image_size = nvm->buf_data_size;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ if (sw->safe_mode)
+ return 0;
+
+ /*
+ * Make sure the device ID in the image matches the one
+ * we read from the switch config space.
+ */
+ device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
+ if (device_id != sw->config.device_id)
+ return -EINVAL;
+
+ /* Skip headers in the image */
+ nvm->buf_data_start = buf + hdr_size;
+ nvm->buf_data_size = image_size - hdr_size;
+
+ return 0;
+}
+
+static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+
+ if (sw->generation < 3) {
+ int ret;
+
+ /* Write CSS headers first */
+ ret = dma_port_flash_write(sw->dma_port,
+ DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
+ DMA_PORT_CSS_MAX_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
+ .read_version = intel_switch_nvm_version,
+ .validate = intel_switch_nvm_validate,
+ .write_headers = intel_switch_nvm_write_headers,
+};
+
+static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ u32 val;
+ int ret;
+
+ ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val << 16) & 0xff0000;
+ nvm->major |= val & 0x00ff00;
+ nvm->major |= (val >> 16) & 0x0000ff;
+
+ ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->minor = (val << 16) & 0xff0000;
+ nvm->minor |= val & 0x00ff00;
+ nvm->minor |= (val >> 16) & 0x0000ff;
+
+ /* ASMedia NVM size is fixed to 512k */
+ nvm->active_size = SZ_512K;
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
+ .read_version = asmedia_switch_nvm_version,
+};
+
+/* Router vendor NVM support table */
+static const struct tb_nvm_vendor switch_nvm_vendors[] = {
+ { 0x174c, &asmedia_switch_nvm_ops },
+ { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
+ { 0x8087, &intel_switch_nvm_ops },
+};
+
+static int intel_retimer_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ u32 val, nvm_size;
+ int ret;
+
+ ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val >> 16) & 0xff;
+ nvm->minor = (val >> 8) & 0xff;
+
+ ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - SZ_16K) / 2;
+ nvm->active_size = nvm_size;
+
+ return 0;
+}
+
+static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
+{
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ unsigned int image_size, hdr_size;
+ u8 *buf = nvm->buf;
+ u16 ds_size, device;
+
+ image_size = nvm->buf_data_size;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ /*
+ * Make sure the device ID in the image matches the retimer
+ * hardware.
+ */
+ device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
+ if (device != rt->device)
+ return -EINVAL;
+
+ /* Skip headers in the image */
+ nvm->buf_data_start = buf + hdr_size;
+ nvm->buf_data_size = image_size - hdr_size;
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
+ .read_version = intel_retimer_nvm_version,
+ .validate = intel_retimer_nvm_validate,
+};
+
+/* Retimer vendor NVM support table */
+static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
+ { 0x8087, &intel_retimer_nvm_ops },
+};
+
+/**
* tb_nvm_alloc() - Allocate new NVM structure
* @dev: Device owning the NVM
*
* Allocates new NVM structure with unique @id and returns it. In case
- * of error returns ERR_PTR().
+ * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
+ * NVM format of the @dev is not known by the kernel.
*/
struct tb_nvm *tb_nvm_alloc(struct device *dev)
{
+ const struct tb_nvm_vendor_ops *vops = NULL;
struct tb_nvm *nvm;
- int ret;
+ int ret, i;
+
+ if (tb_is_switch(dev)) {
+ const struct tb_switch *sw = tb_to_switch(dev);
+
+ for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
+ const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
+
+ if (v->vendor == sw->config.vendor_id) {
+ vops = v->vops;
+ break;
+ }
+ }
+
+ if (!vops) {
+ tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
+ sw->config.vendor_id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ } else if (tb_is_retimer(dev)) {
+ const struct tb_retimer *rt = tb_to_retimer(dev);
+
+ for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
+ const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
+
+ if (v->vendor == rt->vendor) {
+ vops = v->vops;
+ break;
+ }
+ }
+
+ if (!vops) {
+ dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
+ rt->vendor);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ } else {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -38,14 +334,85 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
nvm->id = ret;
nvm->dev = dev;
+ nvm->vops = vops;
return nvm;
}
/**
+ * tb_nvm_read_version() - Read and populate NVM version
+ * @nvm: NVM structure
+ *
+ * Uses vendor specific means to read out and fill in the existing
+ * active NVM version. Returns %0 in case of success and negative errno
+ * otherwise.
+ */
+int tb_nvm_read_version(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+
+ if (vops && vops->read_version)
+ return vops->read_version(nvm);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_nvm_validate() - Validate new NVM image
+ * @nvm: NVM structure
+ *
+ * Runs vendor specific validation over the new NVM image and if all
+ * checks pass returns %0. As side effect updates @nvm->buf_data_start
+ * and @nvm->buf_data_size fields to match the actual data to be written
+ * to the NVM.
+ *
+ * If the validation does not pass then returns negative errno.
+ */
+int tb_nvm_validate(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+ unsigned int image_size;
+ u8 *buf = nvm->buf;
+
+ if (!buf)
+ return -EINVAL;
+ if (!vops)
+ return -EOPNOTSUPP;
+
+ /* Just do basic image size checks */
+ image_size = nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * Set the default data start in the buffer. The validate method
+ * below can change this if needed.
+ */
+ nvm->buf_data_start = buf;
+
+ return vops->validate ? vops->validate(nvm) : 0;
+}
+
+/**
+ * tb_nvm_write_headers() - Write headers before the rest of the image
+ * @nvm: NVM structure
+ *
+ * If the vendor NVM format requires writing headers before the rest of
+ * the image, this function does that. Can be called even if the device
+ * does not need this.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
+ */
+int tb_nvm_write_headers(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+
+ return vops->write_headers ? vops->write_headers(nvm) : 0;
+}
+
+/**
* tb_nvm_add_active() - Adds active NVMem device to NVM
* @nvm: NVM structure
- * @size: Size of the active NVM in bytes
* @reg_read: Pointer to the function to read the NVM (passed directly to the
* NVMem device)
*
@@ -54,7 +421,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
* needed. The first parameter passed to @reg_read is @nvm structure.
* Returns %0 in success and negative errno otherwise.
*/
-int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
+int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@@ -67,7 +434,7 @@ int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
- config.size = size;
+ config.size = nvm->active_size;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
@@ -109,17 +476,17 @@ int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
/**
* tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
* @nvm: NVM structure
- * @size: Size of the non-active NVM in bytes
* @reg_write: Pointer to the function to write the NVM (passed directly
* to the NVMem device)
*
* Registers new non-active NVmem device for @nvm. The @reg_write is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_write is @nvm structure.
+ * The size of the NVMem device is set to %NVM_MAX_SIZE.
+ *
* Returns %0 in success and negative errno otherwise.
*/
-int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
- nvmem_reg_write_t reg_write)
+int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@@ -132,7 +499,7 @@ int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
- config.size = size;
+ config.size = NVM_MAX_SIZE;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 8c29bd556ae0..81252e31014a 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -16,8 +16,23 @@
#define TB_MAX_RETIMER_INDEX 6
-static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
+/**
+ * tb_retimer_nvm_read() - Read contents of retimer NVM
+ * @rt: Retimer device
+ * @address: NVM address (in bytes) to start reading
+ * @buf: Data read from NVM is stored here
+ * @size: Number of bytes to read
+ *
+ * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
+ * read was successful and negative errno in case of failure.
+ */
+int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
+}
+
+static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@@ -30,7 +45,7 @@ static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
+ ret = tb_retimer_nvm_read(rt, offset, val, bytes);
mutex_unlock(&rt->tb->lock);
out:
@@ -40,8 +55,7 @@ out:
return ret;
}
-static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@@ -59,34 +73,23 @@ static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_retimer_nvm_add(struct tb_retimer *rt)
{
struct tb_nvm *nvm;
- u32 val, nvm_size;
int ret;
nvm = tb_nvm_alloc(&rt->dev);
- if (IS_ERR(nvm))
- return PTR_ERR(nvm);
-
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
- sizeof(val));
- if (ret)
+ if (IS_ERR(nvm)) {
+ ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
+ }
- nvm->major = val >> 16;
- nvm->minor = val >> 8;
-
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
- &val, sizeof(val));
+ ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
- nvm_size = (SZ_1M << (val & 7)) / 8;
- nvm_size = (nvm_size - SZ_16K) / 2;
-
- ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
+ ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
@@ -94,59 +97,33 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
return 0;
err_nvm:
- tb_nvm_free(nvm);
+ dev_dbg(&rt->dev, "NVM upgrade disabled\n");
+ if (!IS_ERR(nvm))
+ tb_nvm_free(nvm);
+
return ret;
}
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
{
- unsigned int image_size, hdr_size;
- const u8 *buf = rt->nvm->buf;
- u16 ds_size, device;
+ unsigned int image_size;
+ const u8 *buf;
int ret;
- image_size = rt->nvm->buf_data_size;
- if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
- return -EINVAL;
-
- /*
- * FARB pointer must point inside the image and must at least
- * contain parts of the digital section we will be reading here.
- */
- hdr_size = (*(u32 *)buf) & 0xffffff;
- if (hdr_size + NVM_DEVID + 2 >= image_size)
- return -EINVAL;
-
- /* Digital section start should be aligned to 4k page */
- if (!IS_ALIGNED(hdr_size, SZ_4K))
- return -EINVAL;
-
- /*
- * Read digital section size and check that it also fits inside
- * the image.
- */
- ds_size = *(u16 *)(buf + hdr_size);
- if (ds_size >= image_size)
- return -EINVAL;
-
- /*
- * Make sure the device ID in the image matches the retimer
- * hardware.
- */
- device = *(u16 *)(buf + hdr_size + NVM_DEVID);
- if (device != rt->device)
- return -EINVAL;
+ ret = tb_nvm_validate(rt->nvm);
+ if (ret)
+ return ret;
- /* Skip headers in the image */
- buf += hdr_size;
- image_size -= hdr_size;
+ buf = rt->nvm->buf_data_start;
+ image_size = rt->nvm->buf_data_size;
ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
image_size);
- if (!ret)
- rt->nvm->flushed = true;
+ if (ret)
+ return ret;
- return ret;
+ rt->nvm->flushed = true;
+ return 0;
}
static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
@@ -185,7 +162,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
- return sprintf(buf, "%#x\n", rt->device);
+ return sysfs_emit(buf, "%#x\n", rt->device);
}
static DEVICE_ATTR_RO(device);
@@ -200,8 +177,10 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
+ else if (rt->no_nvm_upgrade)
+ ret = -EOPNOTSUPP;
else
- ret = sprintf(buf, "%#x\n", rt->auth_status);
+ ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
mutex_unlock(&rt->tb->lock);
@@ -276,7 +255,7 @@ static ssize_t nvm_version_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
else
- ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
+ ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
mutex_unlock(&rt->tb->lock);
return ret;
@@ -288,7 +267,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
- return sprintf(buf, "%#x\n", rt->vendor);
+ return sysfs_emit(buf, "%#x\n", rt->vendor);
}
static DEVICE_ATTR_RO(vendor);
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index bda889ff3bda..5185cf3e4d97 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -26,10 +26,68 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */
USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */
+ USB4_SB_OPCODE_READ_LANE_MARGINING_CAP = 0x50434452, /* "RDCP" */
+ USB4_SB_OPCODE_RUN_HW_LANE_MARGINING = 0x474d4852, /* "RHMG" */
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING = 0x474d5352, /* "RSMG" */
+ USB4_SB_OPCODE_READ_SW_MARGIN_ERR = 0x57534452, /* "RDSW" */
};
#define USB4_SB_METADATA 0x09
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
#define USB4_SB_DATA 0x12
+/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */
+#define USB4_MARGIN_CAP_0_MODES_HW BIT(0)
+#define USB4_MARGIN_CAP_0_MODES_SW BIT(1)
+#define USB4_MARGIN_CAP_0_2_LANES BIT(2)
+#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3)
+#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3
+#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0
+#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1
+#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2
+#define USB4_MARGIN_CAP_0_TIME BIT(5)
+#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
+#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6
+#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
+#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13
+#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
+#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
+#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9
+#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
+#define USB4_MARGIN_CAP_1_TIME_LR 0x1
+#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2
+#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11)
+#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11
+#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16)
+#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16
+#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21)
+#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21
+#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26)
+#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+
+/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */
+#define USB4_MARGIN_HW_TIME BIT(3)
+#define USB4_MARGIN_HW_RH BIT(4)
+#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
+#define USB4_MARGIN_HW_BER_SHIFT 5
+
+/* Applicable to all margin values */
+#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
+#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7)
+/* Different lane margin shifts */
+#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8
+#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16
+#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
+
+/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_TIME BIT(3)
+#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
+#define USB4_MARGIN_SW_COUNTER_SHIFT 13
+#define USB4_MARGIN_SW_COUNTER_NOP 0x0
+#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
+#define USB4_MARGIN_SW_COUNTER_START 0x2
+#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 77d7f07ca075..60da5c23ccaf 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -19,8 +19,6 @@
/* Switch NVM support */
-#define NVM_CSS 0x10
-
struct nvm_auth_status {
struct list_head list;
uuid_t uuid;
@@ -102,70 +100,30 @@ static void nvm_clear_auth_status(const struct tb_switch *sw)
static int nvm_validate_and_write(struct tb_switch *sw)
{
- unsigned int image_size, hdr_size;
- const u8 *buf = sw->nvm->buf;
- u16 ds_size;
+ unsigned int image_size;
+ const u8 *buf;
int ret;
- if (!buf)
- return -EINVAL;
-
- image_size = sw->nvm->buf_data_size;
- if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
- return -EINVAL;
-
- /*
- * FARB pointer must point inside the image and must at least
- * contain parts of the digital section we will be reading here.
- */
- hdr_size = (*(u32 *)buf) & 0xffffff;
- if (hdr_size + NVM_DEVID + 2 >= image_size)
- return -EINVAL;
-
- /* Digital section start should be aligned to 4k page */
- if (!IS_ALIGNED(hdr_size, SZ_4K))
- return -EINVAL;
-
- /*
- * Read digital section size and check that it also fits inside
- * the image.
- */
- ds_size = *(u16 *)(buf + hdr_size);
- if (ds_size >= image_size)
- return -EINVAL;
-
- if (!sw->safe_mode) {
- u16 device_id;
+ ret = tb_nvm_validate(sw->nvm);
+ if (ret)
+ return ret;
- /*
- * Make sure the device ID in the image matches the one
- * we read from the switch config space.
- */
- device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
- if (device_id != sw->config.device_id)
- return -EINVAL;
-
- if (sw->generation < 3) {
- /* Write CSS headers first */
- ret = dma_port_flash_write(sw->dma_port,
- DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
- DMA_PORT_CSS_MAX_SIZE);
- if (ret)
- return ret;
- }
+ ret = tb_nvm_write_headers(sw->nvm);
+ if (ret)
+ return ret;
- /* Skip headers in the image */
- buf += hdr_size;
- image_size -= hdr_size;
- }
+ buf = sw->nvm->buf_data_start;
+ image_size = sw->nvm->buf_data_size;
if (tb_switch_is_usb4(sw))
ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
else
ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
- if (!ret)
- sw->nvm->flushed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ sw->nvm->flushed = true;
+ return 0;
}
static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
@@ -300,14 +258,6 @@ static inline bool nvm_upgradeable(struct tb_switch *sw)
return nvm_readable(sw);
}
-static inline int nvm_read(struct tb_switch *sw, unsigned int address,
- void *buf, size_t size)
-{
- if (tb_switch_is_usb4(sw))
- return usb4_switch_nvm_read(sw, address, buf, size);
- return dma_port_flash_read(sw->dma_port, address, buf, size);
-}
-
static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
{
int ret;
@@ -335,8 +285,26 @@ static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
return ret;
}
-static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
+/**
+ * tb_switch_nvm_read() - Read router NVM
+ * @sw: Router whose NVM to read
+ * @address: Start address on the NVM
+ * @buf: Buffer where the read data is copied
+ * @size: Size of the buffer in bytes
+ *
+ * Reads from router NVM and returns the requested data in @buf. Locking
+ * is up to the caller. Returns %0 in success and negative errno in case
+ * of failure.
+ */
+int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@@ -349,7 +317,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = nvm_read(sw, offset, val, bytes);
+ ret = tb_switch_nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@@ -359,8 +327,7 @@ out:
return ret;
}
-static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@@ -384,28 +351,20 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_switch_nvm_add(struct tb_switch *sw)
{
struct tb_nvm *nvm;
- u32 val;
int ret;
if (!nvm_readable(sw))
return 0;
- /*
- * The NVM format of non-Intel hardware is not known so
- * currently restrict NVM upgrade for Intel hardware. We may
- * relax this in the future when we learn other NVM formats.
- */
- if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
- sw->config.vendor_id != 0x8087) {
- dev_info(&sw->dev,
- "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
- sw->config.vendor_id);
- return 0;
+ nvm = tb_nvm_alloc(&sw->dev);
+ if (IS_ERR(nvm)) {
+ ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
+ goto err_nvm;
}
- nvm = tb_nvm_alloc(&sw->dev);
- if (IS_ERR(nvm))
- return PTR_ERR(nvm);
+ ret = tb_nvm_read_version(nvm);
+ if (ret)
+ goto err_nvm;
/*
* If the switch is in safe-mode the only accessible portion of
@@ -413,31 +372,13 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
* write new functional NVM.
*/
if (!sw->safe_mode) {
- u32 nvm_size, hdr_size;
-
- ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
- if (ret)
- goto err_nvm;
-
- hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
- nvm_size = (SZ_1M << (val & 7)) / 8;
- nvm_size = (nvm_size - hdr_size) / 2;
-
- ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
- if (ret)
- goto err_nvm;
-
- nvm->major = val >> 16;
- nvm->minor = val >> 8;
-
- ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
+ ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
}
if (!sw->no_nvm_upgrade) {
- ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
- tb_switch_nvm_write);
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
}
@@ -446,7 +387,11 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
return 0;
err_nvm:
- tb_nvm_free(nvm);
+ tb_sw_dbg(sw, "NVM upgrade disabled\n");
+ sw->no_nvm_upgrade = true;
+ if (!IS_ERR(nvm))
+ tb_nvm_free(nvm);
+
return ret;
}
@@ -1229,6 +1174,135 @@ int tb_port_update_credits(struct tb_port *port)
return tb_port_do_update_credits(port->dual_link_port);
}
+static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
+{
+ u32 phy;
+ int ret;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (secondary)
+ phy |= LANE_ADP_CS_1_PMS;
+ else
+ phy &= ~LANE_ADP_CS_1_PMS;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_pm_secondary_enable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, true);
+}
+
+static int tb_port_pm_secondary_disable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, false);
+}
+
+/* Called for USB4 or Titan Ridge routers only */
+static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
+{
+ u32 val, mask = 0;
+ bool ret;
+
+ /* Don't enable CLx in case of two single-lane links */
+ if (!port->bonded && port->dual_link_port)
+ return false;
+
+ /* Don't enable CLx in case of inter-domain link */
+ if (port->xdomain)
+ return false;
+
+ if (tb_switch_is_usb4(port->sw)) {
+ if (!usb4_port_clx_supported(port))
+ return false;
+ } else if (!tb_lc_is_clx_supported(port)) {
+ return false;
+ }
+
+ if (clx_mask & TB_CL1) {
+ /* CL0s and CL1 are enabled and supported together */
+ mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
+ }
+ if (clx_mask & TB_CL2)
+ mask |= LANE_ADP_CS_0_CL2_SUPPORT;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
+static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
+{
+ u32 phy, mask;
+ int ret;
+
+ /* CL0s and CL1 are enabled and supported together */
+ if (clx == TB_CL1)
+ mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ else
+ /* For now we support only CL0s and CL1. Not CL2 */
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy |= mask;
+ else
+ phy &= ~mask;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
+{
+ return __tb_port_clx_set(port, clx, false);
+}
+
+static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
+{
+ return __tb_port_clx_set(port, clx, true);
+}
+
+/**
+ * tb_port_is_clx_enabled() - Is given CL state enabled
+ * @port: USB4 port to check
+ * @clx_mask: Mask of CL states to check
+ *
+ * Returns true if any of the given CL states is enabled for @port.
+ */
+bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
+{
+ u32 val, mask = 0;
+ int ret;
+
+ if (!tb_port_clx_supported(port, clx_mask))
+ return false;
+
+ if (clx_mask & TB_CL1)
+ mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ if (clx_mask & TB_CL2)
+ mask |= LANE_ADP_CS_1_CL2_ENABLE;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@@ -1620,7 +1694,7 @@ static ssize_t authorized_show(struct device *dev,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->authorized);
+ return sysfs_emit(buf, "%u\n", sw->authorized);
}
static int disapprove_switch(struct device *dev, void *not_used)
@@ -1730,7 +1804,7 @@ static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->boot);
+ return sysfs_emit(buf, "%u\n", sw->boot);
}
static DEVICE_ATTR_RO(boot);
@@ -1739,7 +1813,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%#x\n", sw->device);
+ return sysfs_emit(buf, "%#x\n", sw->device);
}
static DEVICE_ATTR_RO(device);
@@ -1748,7 +1822,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+ return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
}
static DEVICE_ATTR_RO(device_name);
@@ -1757,7 +1831,7 @@ generation_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->generation);
+ return sysfs_emit(buf, "%u\n", sw->generation);
}
static DEVICE_ATTR_RO(generation);
@@ -1771,9 +1845,9 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
return restart_syscall();
if (sw->key)
- ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+ ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
else
- ret = sprintf(buf, "\n");
+ ret = sysfs_emit(buf, "\n");
mutex_unlock(&sw->tb->lock);
return ret;
@@ -1818,7 +1892,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+ return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
}
/*
@@ -1833,7 +1907,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->link_width);
+ return sysfs_emit(buf, "%u\n", sw->link_width);
}
/*
@@ -1850,7 +1924,7 @@ static ssize_t nvm_authenticate_show(struct device *dev,
u32 status;
nvm_get_auth_status(sw, &status);
- return sprintf(buf, "%#x\n", status);
+ return sysfs_emit(buf, "%#x\n", status);
}
static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
@@ -1866,6 +1940,11 @@ static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
goto exit_rpm;
}
+ if (sw->no_nvm_upgrade) {
+ ret = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
/* If NVMem devices are not yet added */
if (!sw->nvm) {
ret = -EAGAIN;
@@ -1954,7 +2033,7 @@ static ssize_t nvm_version_show(struct device *dev,
else if (!sw->nvm)
ret = -EAGAIN;
else
- ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+ ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
mutex_unlock(&sw->tb->lock);
@@ -1967,7 +2046,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%#x\n", sw->vendor);
+ return sysfs_emit(buf, "%#x\n", sw->vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -1976,7 +2055,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+ return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
}
static DEVICE_ATTR_RO(vendor_name);
@@ -1985,7 +2064,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%pUb\n", sw->uuid);
+ return sysfs_emit(buf, "%pUb\n", sw->uuid);
}
static DEVICE_ATTR_RO(unique_id);
@@ -2822,6 +2901,26 @@ static void tb_switch_credits_init(struct tb_switch *sw)
tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
}
+static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ tb_switch_for_each_port(sw, port) {
+ int res;
+
+ if (!port->cap_usb4)
+ continue;
+
+ res = usb4_port_hotplug_enable(port);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -2891,6 +2990,10 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
}
+ ret = tb_switch_port_hotplug_enable(sw);
+ if (ret)
+ return ret;
+
ret = device_add(&sw->dev);
if (ret) {
dev_err(&sw->dev, "failed to add device: %d\n", ret);
@@ -3362,35 +3465,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
-static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
-{
- u32 phy;
- int ret;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (secondary)
- phy |= LANE_ADP_CS_1_PMS;
- else
- phy &= ~LANE_ADP_CS_1_PMS;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_pm_secondary_enable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, true);
-}
-
-static int tb_port_pm_secondary_disable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, false);
-}
-
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
@@ -3409,83 +3483,6 @@ static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
return tb_port_pm_secondary_disable(down);
}
-/* Called for USB4 or Titan Ridge routers only */
-static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
-{
- u32 mask, val;
- bool ret;
-
- /* Don't enable CLx in case of two single-lane links */
- if (!port->bonded && port->dual_link_port)
- return false;
-
- /* Don't enable CLx in case of inter-domain link */
- if (port->xdomain)
- return false;
-
- if (tb_switch_is_usb4(port->sw)) {
- if (!usb4_port_clx_supported(port))
- return false;
- } else if (!tb_lc_is_clx_supported(port)) {
- return false;
- }
-
- switch (clx) {
- case TB_CL1:
- /* CL0s and CL1 are enabled and supported together */
- mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
- break;
-
- /* For now we support only CL0s and CL1. Not CL2 */
- case TB_CL2:
- default:
- return false;
- }
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_0, 1);
- if (ret)
- return false;
-
- return !!(val & mask);
-}
-
-static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
-{
- u32 phy, mask;
- int ret;
-
- /* CL0s and CL1 are enabled and supported together */
- if (clx == TB_CL1)
- mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
- else
- /* For now we support only CL0s and CL1. Not CL2 */
- return -EOPNOTSUPP;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (enable)
- phy |= mask;
- else
- phy &= ~mask;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, false);
-}
-
-static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, true);
-}
-
static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9a277078338c..462845804427 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -105,6 +105,32 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
}
}
+static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available discovered\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+}
+
+static void tb_discover_dp_resources(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel))
+ tb_discover_dp_resource(tb, tunnel->dst_port);
+ }
+}
+
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
@@ -1416,8 +1442,11 @@ static int tb_start(struct tb *tb)
* ICM firmware upgrade needs running firmware and in native
* mode that is not available so disable firmware upgrade of the
* root switch.
+ *
+ * However, USB4 routers support NVM firmware upgrade if they
+ * implement the necessary router operations.
*/
- tb->root_switch->no_nvm_upgrade = true;
+ tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
/* All USB4 routers support runtime PM */
tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
@@ -1446,6 +1475,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 0f067c06cba6..f9786976f5ec 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,11 +23,6 @@
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
-/* Intel specific NVM offsets */
-#define NVM_DEVID 0x05
-#define NVM_VERSION 0x08
-#define NVM_FLASH_SIZE 0x45
-
/**
* struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM
@@ -35,28 +30,35 @@
* @minor: Minor version number of the active NVM portion
* @id: Identifier used with both NVM portions
* @active: Active portion NVMem device
+ * @active_size: Size in bytes of the active NVM
* @non_active: Non-active portion NVMem device
* @buf: Buffer where the NVM image is stored before it is written to
* the actual NVM flash device
+ * @buf_data_start: Where the actual image starts after skipping
+ * possible headers
* @buf_data_size: Number of bytes actually consumed by the new NVM
* image
* @authenticating: The device is authenticating the new NVM
* @flushed: The image has been flushed to the storage area
+ * @vops: Router vendor specific NVM operations (optional)
*
* The user of this structure needs to handle serialization of possible
* concurrent access.
*/
struct tb_nvm {
struct device *dev;
- u8 major;
- u8 minor;
+ u32 major;
+ u32 minor;
int id;
struct nvmem_device *active;
+ size_t active_size;
struct nvmem_device *non_active;
void *buf;
+ void *buf_data_start;
size_t buf_data_size;
bool authenticating;
bool flushed;
+ const struct tb_nvm_vendor_ops *vops;
};
enum tb_nvm_write_ops {
@@ -113,8 +115,8 @@ struct tb_switch_tmu {
enum tb_clx {
TB_CLX_DISABLE,
/* CL0s and CL1 are enabled and supported together */
- TB_CL1,
- TB_CL2,
+ TB_CL1 = BIT(0),
+ TB_CL2 = BIT(1),
};
/**
@@ -279,12 +281,16 @@ struct tb_port {
* @can_offline: Does the port have necessary platform support to moved
* it into offline mode and back
* @offline: The port is currently in offline mode
+ * @margining: Pointer to margining structure if enabled
*/
struct usb4_port {
struct device dev;
struct tb_port *port;
bool can_offline;
bool offline;
+#ifdef CONFIG_USB4_DEBUGFS_MARGINING
+ struct tb_margining *margining;
+#endif
};
/**
@@ -296,6 +302,7 @@ struct usb4_port {
* @device: Device ID of the retimer
* @port: Pointer to the lane 0 adapter
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this retimer
* @auth_status: Status of last NVM authentication
*/
struct tb_retimer {
@@ -306,6 +313,7 @@ struct tb_retimer {
u32 device;
struct tb_port *port;
struct tb_nvm *nvm;
+ bool no_nvm_upgrade;
u32 auth_status;
};
@@ -737,11 +745,13 @@ static inline void tb_domain_put(struct tb *tb)
}
struct tb_nvm *tb_nvm_alloc(struct device *dev);
-int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
+int tb_nvm_read_version(struct tb_nvm *nvm);
+int tb_nvm_validate(struct tb_nvm *nvm);
+int tb_nvm_write_headers(struct tb_nvm *nvm);
+int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
size_t bytes);
-int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
- nvmem_reg_write_t reg_write);
+int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
void tb_nvm_free(struct tb_nvm *nvm);
void tb_nvm_exit(void);
@@ -755,6 +765,8 @@ int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
unsigned int retries, write_block_fn write_next_block,
void *write_block_data);
+int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route);
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
@@ -1035,6 +1047,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
+bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -1132,6 +1145,13 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
+static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
+{
+ return tb_to_switch(xd->dev.parent);
+}
+
+int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
+ size_t size);
int tb_retimer_scan(struct tb_port *port, bool add);
void tb_retimer_remove_all(struct tb_port *port);
@@ -1174,6 +1194,7 @@ int usb4_switch_add_ports(struct tb_switch *sw);
void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
+int usb4_port_hotplug_enable(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
@@ -1182,6 +1203,13 @@ int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
+int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
+int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+ unsigned int ber_level, bool timing, bool right_high,
+ u32 *results);
+int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ bool right_high, u32 counter);
+int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
@@ -1264,6 +1292,8 @@ void tb_debugfs_init(void);
void tb_debugfs_exit(void);
void tb_switch_debugfs_init(struct tb_switch *sw);
void tb_switch_debugfs_remove(struct tb_switch *sw);
+void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
+void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
#else
@@ -1271,6 +1301,8 @@ static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
+static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
+static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 166054110388..86319dca0f8c 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -308,6 +308,7 @@ struct tb_regs_port_header {
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+#define ADP_CS_5_DHP BIT(31)
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
@@ -324,6 +325,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
+#define LANE_ADP_CS_0_CL2_SUPPORT BIT(28)
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
@@ -333,6 +335,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
+#define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index a386228a44ee..f986854aa207 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1046,6 +1046,26 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
+/**
+ * usb4_port_hotplug_enable() - Enables hotplug for a port
+ * @port: USB4 port to operate on
+ *
+ * Enables hot plug events on a given port. This is only intended
+ * to be used on lane, DP-IN, and DP-OUT adapters.
+ */
+int usb4_port_hotplug_enable(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_5_DHP;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -1386,6 +1406,126 @@ bool usb4_port_clx_supported(struct tb_port *port)
return !!(val & PORT_CS_18_CPS);
}
+/**
+ * usb4_port_margining_caps() - Read USB4 port marginig capabilities
+ * @port: USB4 port
+ * @caps: Array with at least two elements to hold the results
+ *
+ * Reads the USB4 port lane margining capabilities into @caps.
+ */
+int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
+{
+ int ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_DATA, caps, sizeof(*caps) * 2);
+}
+
+/**
+ * usb4_port_hw_margin() - Run hardware lane margining on port
+ * @port: USB4 port
+ * @lanes: Which lanes to run (must match the port capabilities). Can be
+ * %0, %1 or %7.
+ * @ber_level: BER level contour value
+ * @timing: Perform timing margining instead of voltage
+ * @right_high: Use Right/high margin instead of left/low
+ * @results: Array with at least two elements to hold the results
+ *
+ * Runs hardware lane margining on USB4 port and returns the result in
+ * @results.
+ */
+int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+ unsigned int ber_level, bool timing, bool right_high,
+ u32 *results)
+{
+ u32 val;
+ int ret;
+
+ val = lanes;
+ if (timing)
+ val |= USB4_MARGIN_HW_TIME;
+ if (right_high)
+ val |= USB4_MARGIN_HW_RH;
+ if (ber_level)
+ val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
+ USB4_MARGIN_HW_BER_MASK;
+
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_DATA, results, sizeof(*results) * 2);
+}
+
+/**
+ * usb4_port_sw_margin() - Run software lane margining on port
+ * @port: USB4 port
+ * @lanes: Which lanes to run (must match the port capabilities). Can be
+ * %0, %1 or %7.
+ * @timing: Perform timing margining instead of voltage
+ * @right_high: Use Right/high margin instead of left/low
+ * @counter: What to do with the error counter
+ *
+ * Runs software lane margining on USB4 port. Read back the error
+ * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
+ * success and negative errno otherwise.
+ */
+int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ bool right_high, u32 counter)
+{
+ u32 val;
+ int ret;
+
+ val = lanes;
+ if (timing)
+ val |= USB4_MARGIN_SW_TIME;
+ if (right_high)
+ val |= USB4_MARGIN_SW_RH;
+ val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
+ USB4_MARGIN_SW_COUNTER_MASK;
+
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+}
+
+/**
+ * usb4_port_sw_margin_errors() - Read the software margining error counters
+ * @port: USB4 port
+ * @errors: Error metadata is copied here.
+ *
+ * This reads back the software margining error counters from the port.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
+{
+ int ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, errors, sizeof(*errors));
+}
+
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index c31c0d94d8b3..f00b2f62d8e3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -877,7 +877,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
- return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
+ return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
@@ -903,7 +903,7 @@ static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcid);
+ return sysfs_emit(buf, "%u\n", svc->prtcid);
}
static DEVICE_ATTR_RO(prtcid);
@@ -912,7 +912,7 @@ static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcvers);
+ return sysfs_emit(buf, "%u\n", svc->prtcvers);
}
static DEVICE_ATTR_RO(prtcvers);
@@ -921,7 +921,7 @@ static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcrevs);
+ return sysfs_emit(buf, "%u\n", svc->prtcrevs);
}
static DEVICE_ATTR_RO(prtcrevs);
@@ -930,7 +930,7 @@ static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "0x%08x\n", svc->prtcstns);
+ return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
}
static DEVICE_ATTR_RO(prtcstns);
@@ -1131,11 +1131,6 @@ static int populate_properties(struct tb_xdomain *xd,
return 0;
}
-static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
-{
- return tb_to_switch(xd->dev.parent);
-}
-
static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
bool change = false;
@@ -1440,6 +1435,8 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->vendor_name && xd->device_name)
dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
xd->device_name);
+
+ tb_xdomain_debugfs_init(xd);
} else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
}
@@ -1664,7 +1661,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->device);
+ return sysfs_emit(buf, "%#x\n", xd->device);
}
static DEVICE_ATTR_RO(device);
@@ -1676,7 +1673,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@@ -1688,7 +1685,7 @@ static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%d\n", xd->remote_max_hopid);
+ return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
}
static DEVICE_ATTR_RO(maxhopid);
@@ -1697,7 +1694,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->vendor);
+ return sysfs_emit(buf, "%#x\n", xd->vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -1709,7 +1706,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@@ -1721,7 +1718,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%pUb\n", xd->remote_uuid);
+ return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
}
static DEVICE_ATTR_RO(unique_id);
@@ -1730,7 +1727,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
+ return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
}
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
@@ -1741,7 +1738,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%u\n", xd->link_width);
+ return sysfs_emit(buf, "%u\n", xd->link_width);
}
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
@@ -1940,6 +1937,8 @@ static int unregister_service(struct device *dev, void *data)
*/
void tb_xdomain_remove(struct tb_xdomain *xd)
{
+ tb_xdomain_debugfs_remove(xd);
+
stop_handshake(xd);
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
@@ -2438,7 +2437,7 @@ int tb_xdomain_init(void)
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
- xdomain_property_block_gen = prandom_u32();
+ xdomain_property_block_gen = get_random_u32();
return 0;
}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 81e7f64c1739..f52266766df9 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -94,7 +94,7 @@ static struct tty_driver *serial_driver;
static unsigned char current_ctl_bits;
static void change_speed(struct tty_struct *tty, struct serial_state *info,
- struct ktermios *old);
+ const struct ktermios *old);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -566,7 +566,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
* the specified baud rate for a serial port.
*/
static void change_speed(struct tty_struct *tty, struct serial_state *info,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct tty_port *port = &info->tport;
int quot = 0, baud_base, baud;
@@ -1169,7 +1169,7 @@ static int rs_ioctl(struct tty_struct *tty,
return 0;
}
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void rs_set_termios(struct tty_struct *tty, const struct ktermios *old_termios)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 32366caca662..7d49a872de48 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -29,7 +29,6 @@
/* General device driver settings */
-#define HVC_IUCV_MAGIC 0xc9e4c3e5
#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
@@ -131,9 +130,9 @@ static struct iucv_handler hvc_iucv_handler = {
*/
static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
{
- if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
+ if (num > hvc_iucv_devices)
return NULL;
- return hvc_iucv_table[num - HVC_IUCV_MAGIC];
+ return hvc_iucv_table[num];
}
/**
@@ -1072,8 +1071,8 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->is_console = is_console;
/* allocate hvc device */
- priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
- HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
+ priv->hvc = hvc_alloc(id, /* PAGE_SIZE */
+ id, &hvc_iucv_ops, 256);
if (IS_ERR(priv->hvc)) {
rc = PTR_ERR(priv->hvc);
goto out_error_hvc;
@@ -1371,7 +1370,7 @@ static int __init hvc_iucv_init(void)
/* register the first terminal device as console
* (must be done before allocating hvc terminal devices) */
- rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
+ rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
if (rc) {
pr_err("Registering HVC terminal device as "
"Linux console failed\n");
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 9b7e8246a464..4ba24963685e 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -69,6 +69,7 @@
#include <asm/hvconsole.h>
#include <asm/hvcserver.h>
#include <linux/uaccess.h>
+#include <linux/termios_internal.h>
#include <asm/vio.h>
/*
@@ -839,7 +840,7 @@ static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
hvcsd->p_partition_ID = pi->partition_ID;
/* copy the null-term char too */
- strlcpy(hvcsd->p_location_code, pi->location_code,
+ strscpy(hvcsd->p_location_code, pi->location_code,
sizeof(hvcsd->p_location_code));
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index f3c72ab1476c..35b6fddf0341 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -491,7 +491,7 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int);
static unsigned int moxa_write_room(struct tty_struct *);
static void moxa_flush_buffer(struct tty_struct *);
static unsigned int moxa_chars_in_buffer(struct tty_struct *);
-static void moxa_set_termios(struct tty_struct *, struct ktermios *);
+static void moxa_set_termios(struct tty_struct *, const struct ktermios *);
static void moxa_stop(struct tty_struct *);
static void moxa_start(struct tty_struct *);
static void moxa_hangup(struct tty_struct *);
@@ -499,7 +499,7 @@ static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void moxa_poll(struct timer_list *);
-static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
+static void moxa_set_tty_param(struct tty_struct *, const struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
static void moxa_dtr_rts(struct tty_port *, int);
@@ -1602,7 +1602,7 @@ static int moxa_tiocmset(struct tty_struct *tty,
}
static void moxa_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct moxa_port *ch = tty->driver_data;
@@ -1761,7 +1761,8 @@ static void moxa_poll(struct timer_list *unused)
/******************************************************************************/
-static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios)
+static void moxa_set_tty_param(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
register struct ktermios *ts = &tty->termios;
struct moxa_port *ch = tty->driver_data;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 70b982b2c6b2..2436e0b10f9a 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -398,7 +398,7 @@ static enum mxser_must_hwid mxser_must_get_hwid(unsigned long io)
oldmcr = inb(io + UART_MCR);
outb(0, io + UART_MCR);
mxser_set_must_xon1_value(io, 0x11);
- if ((hwid = inb(io + UART_MCR)) != 0) {
+ if (inb(io + UART_MCR) != 0) {
outb(oldmcr, io + UART_MCR);
return MOXA_OTHER_UART;
}
@@ -571,7 +571,8 @@ static void mxser_handle_cts(struct tty_struct *tty, struct mxser_port *info,
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static void mxser_change_speed(struct tty_struct *tty, struct ktermios *old_termios)
+static void mxser_change_speed(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval;
@@ -1348,7 +1349,8 @@ static void mxser_start(struct tty_struct *tty)
spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void mxser_set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 01c112e2e214..5e516f5cac5a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -63,6 +63,14 @@
static int debug;
module_param(debug, int, 0600);
+/* Module debug bits */
+#define DBG_DUMP BIT(0) /* Data transmission dump. */
+#define DBG_CD_ON BIT(1) /* Always assume CD line on. */
+#define DBG_DATA BIT(2) /* Data transmission details. */
+#define DBG_ERRORS BIT(3) /* Details for fail conditions. */
+#define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */
+#define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */
+
/* Defaults: these are from the specification */
#define T1 10 /* 100mS */
@@ -164,6 +172,9 @@ struct gsm_dlci {
struct net_device *net; /* network interface, if created */
};
+/* Total number of supported devices */
+#define GSM_TTY_MINORS 256
+
/* DLCI 0, 62/63 are special or reserved see gsmtty_open */
#define NUM_DLCI 64
@@ -184,6 +195,11 @@ struct gsm_control {
int error; /* Error if any */
};
+enum gsm_encoding {
+ GSM_BASIC_OPT,
+ GSM_ADV_OPT,
+};
+
enum gsm_mux_state {
GSM_SEARCH,
GSM_START,
@@ -230,7 +246,7 @@ struct gsm_mux {
unsigned int address;
unsigned int count;
bool escape;
- int encoding;
+ enum gsm_encoding encoding;
u8 control;
u8 fcs;
u8 *txframe; /* TX framing buffer */
@@ -527,7 +543,7 @@ static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
*/
dev = tty_register_device(gsm_tty_driver, base + i, NULL);
if (IS_ERR(dev)) {
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_info("%s failed to register device minor %u",
__func__, base + i);
for (i--; i >= 1; i--)
@@ -581,8 +597,12 @@ static void gsm_unregister_devices(struct tty_driver *driver,
static void gsm_print_packet(const char *hdr, int addr, int cr,
u8 control, const u8 *data, int dlen)
{
- if (!(debug & 1))
+ if (!(debug & DBG_DUMP))
return;
+ /* Only show user payload frames if debug & DBG_PAYLOAD */
+ if (!(debug & DBG_PAYLOAD) && addr != 0)
+ if ((control & ~PF) == UI || (control & ~PF) == UIH)
+ return;
pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
@@ -693,7 +713,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
*dp++ = (addr << 2) | (ocr << 1) | EA;
*dp++ = control;
- if (gsm->encoding == 0)
+ if (gsm->encoding == GSM_BASIC_OPT)
*dp++ = EA; /* Length of data = 0 */
*dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
@@ -812,7 +832,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
int len, ret;
- if (gsm->encoding == 0) {
+ if (gsm->encoding == GSM_BASIC_OPT) {
gsm->txframe[0] = GSM0_SOF;
memcpy(gsm->txframe + 1, msg->data, msg->len);
gsm->txframe[msg->len + 1] = GSM0_SOF;
@@ -824,7 +844,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
len += 2;
}
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, gsm->txframe, len);
gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
msg->len);
@@ -964,7 +984,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
u8 *fcs = dp + msg->len;
/* Fill in the header */
- if (gsm->encoding == 0) {
+ if (gsm->encoding == GSM_BASIC_OPT) {
if (msg->len < 128)
*--dp = (msg->len << 1) | EA;
else {
@@ -1306,6 +1326,31 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
/**
+ * gsm_control_command - send a command frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our command.
+ */
+static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+
+ if (msg == NULL)
+ return -ENOMEM;
+
+ msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+
+ return 0;
+}
+
+/**
* gsm_control_reply - send a response frame to a control
* @gsm: gsm channel
* @cmd: the command to use
@@ -1407,18 +1452,12 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
unsigned int modem = 0;
struct gsm_dlci *dlci;
int len = clen;
- int slen;
+ int cl = clen;
const u8 *dp = data;
struct tty_struct *tty;
- while (gsm_read_ea(&addr, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- /* Must be at least one byte following the EA */
- len--;
- if (len <= 0)
+ len = gsm_read_ea_val(&addr, data, cl);
+ if (len < 1)
return;
addr >>= 1;
@@ -1427,15 +1466,20 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
dlci = gsm->dlci[addr];
- slen = len;
- while (gsm_read_ea(&modem, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- len--;
+ /* Must be at least one byte following the EA */
+ if ((cl - len) < 1)
+ return;
+
+ dp += len;
+ cl -= len;
+
+ /* get the modem status */
+ len = gsm_read_ea_val(&modem, dp, cl);
+ if (len < 1)
+ return;
+
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, slen - len);
+ gsm_process_modem(tty, dlci, modem, cl);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1611,13 +1655,7 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
- if (msg == NULL)
- return;
- msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
- msg->data[1] = (ctrl->len << 1) | EA;
- memcpy(msg->data + 2, ctrl->data, ctrl->len);
- gsm_data_queue(gsm->dlci[0], msg);
+ gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len);
}
/**
@@ -1737,7 +1775,7 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
del_timer(&dlci->t1);
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
/* Prevent us from sending data before the link is up again */
@@ -1771,7 +1809,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
dlci->constipated = false;
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
@@ -1807,7 +1845,7 @@ static void gsm_dlci_t1(struct timer_list *t)
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
dlci->mode = DLCI_MODE_ADM;
@@ -1910,11 +1948,10 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
struct tty_port *port = &dlci->port;
struct tty_struct *tty;
unsigned int modem = 0;
- int len = clen;
- int slen = 0;
+ int len;
- if (debug & 16)
- pr_debug("%d bytes for tty\n", len);
+ if (debug & DBG_TTY)
+ pr_debug("%d bytes for tty\n", clen);
switch (dlci->adaption) {
/* Unsupported types */
case 4: /* Packetised interruptible data */
@@ -1922,24 +1959,22 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
case 3: /* Packetised uininterruptible voice/data */
break;
case 2: /* Asynchronous serial with line state in each frame */
- while (gsm_read_ea(&modem, *data++) == 0) {
- len--;
- slen++;
- if (len == 0)
- return;
- }
- len--;
- slen++;
+ len = gsm_read_ea_val(&modem, data, clen);
+ if (len < 1)
+ return;
tty = tty_port_tty_get(port);
if (tty) {
- gsm_process_modem(tty, dlci, modem, slen);
+ gsm_process_modem(tty, dlci, modem, len);
tty_wakeup(tty);
tty_kref_put(tty);
}
+ /* Skip processed modem data */
+ data += len;
+ clen -= len;
fallthrough;
case 1: /* Line state will go via DLCI 0 controls only */
default:
- tty_insert_flip_string(port, data, len);
+ tty_insert_flip_string(port, data, clen);
tty_flip_buffer_push(port);
}
}
@@ -1960,24 +1995,27 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
{
/* See what command is involved */
unsigned int command = 0;
- while (len-- > 0) {
- if (gsm_read_ea(&command, *data++) == 1) {
- int clen = *data++;
- len--;
- /* FIXME: this is properly an EA */
- clen >>= 1;
- /* Malformed command ? */
- if (clen > len)
- return;
- if (command & 1)
- gsm_control_message(dlci->gsm, command,
- data, clen);
- else
- gsm_control_response(dlci->gsm, command,
- data, clen);
- return;
- }
- }
+ unsigned int clen = 0;
+ unsigned int dlen;
+
+ /* read the command */
+ dlen = gsm_read_ea_val(&command, data, len);
+ len -= dlen;
+ data += dlen;
+
+ /* read any control data */
+ dlen = gsm_read_ea_val(&clen, data, len);
+ len -= dlen;
+ data += dlen;
+
+ /* Malformed command? */
+ if (clen > len)
+ return;
+
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command, data, clen);
+ else
+ gsm_control_response(dlci->gsm, command, data, clen);
}
/**
@@ -1999,7 +2037,7 @@ static void gsm_kick_timeout(struct work_struct *work)
sent = gsm_dlci_data_sweep(gsm);
mutex_unlock(&gsm->tx_mutex);
- if (sent && debug & 4)
+ if (sent && debug & DBG_DATA)
pr_info("%s TX queue stalled\n", __func__);
}
@@ -2133,7 +2171,7 @@ static void gsm_queue(struct gsm_mux *gsm)
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
- if (debug & 4)
+ if (debug & DBG_DATA)
pr_debug("BAD FCS %02x\n", gsm->fcs);
return;
}
@@ -2497,7 +2535,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
if (dlci == NULL)
return -ENOMEM;
- if (gsm->encoding == 0)
+ if (gsm->encoding == GSM_BASIC_OPT)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
@@ -2614,7 +2652,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
gsm->n2 = N2;
gsm->ftype = UIH;
gsm->adaption = 1;
- gsm->encoding = 1;
+ gsm->encoding = GSM_ADV_OPT;
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
@@ -2716,7 +2754,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
gsm->initiator = c->initiator;
gsm->mru = c->mru;
gsm->mtu = c->mtu;
- gsm->encoding = c->encapsulation;
+ gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT;
gsm->adaption = c->adaption;
gsm->n2 = c->n2;
@@ -2760,7 +2798,7 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
return -ENOSPC;
}
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, data, len);
return gsm->tty->ops->write(gsm->tty, data, len);
}
@@ -2846,7 +2884,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
struct gsm_mux *gsm = tty->disc_data;
char flags = TTY_NORMAL;
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
@@ -2939,8 +2977,7 @@ static int gsmld_open(struct tty_struct *tty)
tty->receive_room = 65536;
/* Attach the initial passive connection */
- gsm->encoding = 1;
-
+ gsm->encoding = GSM_ADV_OPT;
gsmld_attach_gsm(tty, gsm);
return 0;
@@ -3336,7 +3373,7 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
struct gsm_control *ctrl;
int len = 2;
- if (dlci->gsm->encoding != 0)
+ if (dlci->gsm->encoding != GSM_BASIC_OPT)
return 0;
modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
@@ -3365,7 +3402,7 @@ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
/* Send convergence layer type 2 empty data frame. */
gsm_modem_upd_via_data(dlci, brk);
return 0;
- } else if (dlci->gsm->encoding == 0) {
+ } else if (dlci->gsm->encoding == GSM_BASIC_OPT) {
/* Send as MSC control message. */
return gsm_modem_upd_via_msc(dlci, brk);
}
@@ -3382,15 +3419,15 @@ static int gsm_carrier_raised(struct tty_port *port)
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
- if (debug & 2)
+ if (debug & DBG_CD_ON)
return 1;
/*
* Basic mode with control channel in ADM mode may not respond
* to CMD_MSC at all and modem_rx is empty.
*/
- if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
- !dlci->modem_rx)
+ if (gsm->encoding == GSM_BASIC_OPT &&
+ gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx)
return 1;
return dlci->modem_rx & TIOCM_CD;
@@ -3638,7 +3675,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
}
}
-static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void gsmtty_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
@@ -3736,7 +3774,7 @@ static int __init gsm_init(void)
return status;
}
- gsm_tty_driver = tty_alloc_driver(256, TTY_DRIVER_REAL_RAW |
+ gsm_tty_driver = tty_alloc_driver(GSM_TTY_MINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
if (IS_ERR(gsm_tty_driver)) {
pr_err("gsm_init: tty allocation failed.\n");
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 94c1ec2dd754..46b09bfb6f3a 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -76,8 +76,6 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define HDLC_MAGIC 0x239e
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -98,7 +96,6 @@
#include <linux/if.h>
#include <linux/bitops.h>
-#include <asm/termios.h>
#include <linux/uaccess.h>
#include "tty.h"
@@ -124,7 +121,6 @@ struct n_hdlc_buf_list {
/**
* struct n_hdlc - per device instance data structure
- * @magic: magic value for structure
* @tbusy: reentrancy flag for tx wakeup code
* @woke_up: tx wakeup needs to be run again as it was called while @tbusy
* @tx_buf_list: list of pending transmit frame buffers
@@ -133,7 +129,6 @@ struct n_hdlc_buf_list {
* @rx_free_buf_list: list unused received frame buffers
*/
struct n_hdlc {
- int magic;
bool tbusy;
bool woke_up;
struct n_hdlc_buf_list tx_buf_list;
@@ -200,10 +195,6 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
- if (n_hdlc->magic != HDLC_MAGIC) {
- pr_warn("n_hdlc: trying to close unopened tty!\n");
- return;
- }
#if defined(TTY_NO_WRITE_SPLIT)
clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
#endif
@@ -386,12 +377,6 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
pr_debug("%s() called count=%d\n", __func__, count);
- /* verify line is using HDLC discipline */
- if (n_hdlc->magic != HDLC_MAGIC) {
- pr_err("line not using HDLC discipline\n");
- return;
- }
-
if (count > maxframe) {
pr_debug("rx count>maxframesize, data discarded\n");
return;
@@ -542,9 +527,6 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
pr_debug("%s() called count=%zd\n", __func__, count);
- if (n_hdlc->magic != HDLC_MAGIC)
- return -EIO;
-
/* verify frame size */
if (count > maxframe) {
pr_debug("%s: truncating user packet from %zu to %d\n",
@@ -609,10 +591,6 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
pr_debug("%s() called %d\n", __func__, cmd);
- /* Verify the status of the device */
- if (n_hdlc->magic != HDLC_MAGIC)
- return -EBADF;
-
switch (cmd) {
case FIONREAD:
/* report count of read data available */
@@ -673,9 +651,6 @@ static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
struct n_hdlc *n_hdlc = tty->disc_data;
__poll_t mask = 0;
- if (n_hdlc->magic != HDLC_MAGIC)
- return 0;
-
/*
* queue the current process into any wait queue that may awaken in the
* future (read and write)
@@ -739,9 +714,6 @@ static struct n_hdlc *n_hdlc_alloc(void)
n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx");
n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx");
- /* Initialize the control block */
- n_hdlc->magic = HDLC_MAGIC;
-
return n_hdlc;
} /* end of n_hdlc_alloc() */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 3afdd9033a9c..597019690ae6 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,7 +1758,7 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
*
* Locking: Caller holds @tty->termios_rwsem
*/
-static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void n_tty_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 752dab3356d7..07394fdaf522 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -240,7 +240,7 @@ out:
}
static void pty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
/* See if packet mode change of state. */
if (tty->link && tty->link->ctrl.packet) {
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 65d6af755567..c7d34823f715 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -243,7 +243,7 @@ static void serial21285_shutdown(struct uart_port *port)
static void
serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, quot, h_lcr, b;
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 8efdc271eb75..fa8ccf204d86 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -755,7 +755,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
static void brcmstb_set_termios(struct uart_port *up,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *p8250 = up_to_u8250p(up);
struct brcmuart_priv *priv = up->private_data;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 2e83e7367441..94fbf0add2ce 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -298,10 +298,9 @@ static void serial8250_backup_timeout(struct timer_list *t)
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
}
-static int univ8250_setup_irq(struct uart_8250_port *up)
+static void univ8250_setup_timer(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
- int retval = 0;
/*
* The above check will only give an accurate result the first time
@@ -322,10 +321,16 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
*/
if (!port->irq)
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
- else
- retval = serial_link_irq_chain(up);
+}
- return retval;
+static int univ8250_setup_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ if (port->irq)
+ return serial_link_irq_chain(up);
+
+ return 0;
}
static void univ8250_release_irq(struct uart_8250_port *up)
@@ -381,6 +386,7 @@ static struct uart_ops univ8250_port_ops;
static const struct uart_8250_ops univ8250_driver_ops = {
.setup_irq = univ8250_setup_irq,
.release_irq = univ8250_release_irq,
+ .setup_timer = univ8250_setup_timer,
};
static struct uart_8250_port serial8250_ports[UART_NR];
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index a8dba4a0a8fb..b85c82616e8c 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -26,9 +26,7 @@ static void __dma_tx_complete(void *param)
dma->tx_running = 0;
- xmit->tail += dma->tx_size;
- xmit->tail &= UART_XMIT_SIZE - 1;
- p->port.icount.tx += dma->tx_size;
+ uart_xmit_advance(&p->port, dma->tx_size);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&p->port);
@@ -107,8 +105,7 @@ int serial8250_tx_dma(struct uart_8250_port *p)
dma_async_issue_pending(dma->txchan);
serial8250_clear_THRI(p);
- if (dma->tx_err)
- dma->tx_err = 0;
+ dma->tx_err = 0;
return 0;
err:
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a604b42e4458..7db51781289e 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -350,7 +350,7 @@ dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
}
static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long newrate = tty_termios_baud_rate(termios) * 16;
struct dw8250_data *d = to_dw8250_data(p->private_data);
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index dbe4d44f60d4..75f32f054ebb 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -92,7 +92,8 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
serial8250_do_set_divisor(p, baud, quot, quot_frac);
}
-void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old)
+void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios,
+ const struct ktermios *old)
{
p->status &= ~UPSTAT_AUTOCTS;
if (termios->c_cflag & CRTSCTS)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index 055bfdc87985..f13e91f2cace 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -47,7 +47,7 @@ struct dw8250_data {
unsigned int uart_16550_compatible:1;
};
-void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old);
+void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 65b6b3cbaff6..e2aa2a1a02dd 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -278,7 +278,7 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
static void fintek_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct fintek_8250 *pdata = port->private_data;
unsigned int baud = tty_termios_baud_rate(termios);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 4ba43bef9933..44cc755b1a29 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -70,7 +70,7 @@ static inline struct lpss8250 *to_lpss8250(struct dw8250_port_data *data)
}
static void byt_set_termios(struct uart_port *p, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
struct lpss8250 *lpss = to_lpss8250(p->private_data);
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 737c4c31e8a0..f46ca13ff4aa 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -7,7 +7,6 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
-#include <uapi/linux/serial_core.h>
#define MEN_UART_ID_Z025 0x19
#define MEN_UART_ID_Z057 0x39
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index a2a03acb04ad..2cc78a4bf7a1 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -206,9 +206,8 @@ static void dnv_exit(struct mid8250 *mid)
/*****************************************************************************/
-static void mid8250_set_termios(struct uart_port *p,
- struct ktermios *termios,
- struct ktermios *old)
+static void mid8250_set_termios(struct uart_port *p, struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
struct mid8250 *mid = p->private_data;
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 54051ec7b499..fb1d5ec0940e 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -291,7 +291,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
static void
mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
static const unsigned short fraction_L_mapping[] = {
0, 1, 0x5, 0x15, 0x55, 0x57, 0x57, 0x77, 0x7F, 0xFF, 0xFF
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index f7fbef83583c..41b8c6b27136 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -342,6 +342,9 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
omap8250_update_mdr1(up, priv);
up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+
+ if (up->port.rs485.flags & SER_RS485_ENABLED)
+ serial8250_em485_stop_tx(up);
}
/*
@@ -350,7 +353,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
*/
static void omap_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
@@ -984,9 +987,7 @@ static void omap_8250_dma_tx_complete(void *param)
dma->tx_running = 0;
- xmit->tail += dma->tx_size;
- xmit->tail &= UART_XMIT_SIZE - 1;
- p->port.icount.tx += dma->tx_size;
+ uart_xmit_advance(&p->port, dma->tx_size);
if (priv->delayed_restore) {
priv->delayed_restore = 0;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6f66dc2ebacc..8e9f247590bd 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1232,6 +1232,10 @@ static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port,
serial8250_do_set_mctrl(port, mctrl);
}
+/*
+ * We require EFR features for clock programming, so set UPF_FULL_PROBE
+ * for full probing regardless of CONFIG_SERIAL_8250_16550A_VARIANTS setting.
+ */
static int pci_oxsemi_tornado_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *up, int idx)
@@ -1239,6 +1243,7 @@ static int pci_oxsemi_tornado_setup(struct serial_private *priv,
struct pci_dev *dev = priv->dev;
if (pci_oxsemi_tornado_p(dev)) {
+ up->port.flags |= UPF_FULL_PROBE;
up->port.get_divisor = pci_oxsemi_tornado_get_divisor;
up->port.set_divisor = pci_oxsemi_tornado_set_divisor;
up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl;
@@ -1627,7 +1632,6 @@ static int pci_fintek_init(struct pci_dev *dev)
resource_size_t bar_data[3];
u8 config_base;
struct serial_private *priv = pci_get_drvdata(dev);
- struct uart_8250_port *port;
if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) ||
!(pci_resource_flags(dev, 4) & IORESOURCE_IO) ||
@@ -1674,13 +1678,7 @@ static int pci_fintek_init(struct pci_dev *dev)
pci_write_config_byte(dev, config_base + 0x06, dev->irq);
- if (priv) {
- /* re-apply RS232/485 mode when
- * pciserial_resume_ports()
- */
- port = serial8250_get_port(priv->line[i]);
- uart_rs485_config(&port->port);
- } else {
+ if (!priv) {
/* First init without port data
* force init to RS232 Mode
*/
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 39b35a61958c..fe8662cd9402 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -600,7 +600,7 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put);
static int serial8250_em485_init(struct uart_8250_port *p)
{
if (p->em485)
- return 0;
+ goto deassert_rts;
p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
if (!p->em485)
@@ -616,7 +616,9 @@ static int serial8250_em485_init(struct uart_8250_port *p)
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
- p->rs485_stop_tx(p);
+deassert_rts:
+ if (p->em485->tx_stopped)
+ p->rs485_stop_tx(p);
return 0;
}
@@ -752,6 +754,14 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial8250_rpm_put(p);
}
+static void serial8250_clear_IER(struct uart_8250_port *up)
+{
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+}
+
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
@@ -1021,7 +1031,8 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
- if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+ if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) &&
+ !(up->port.flags & UPF_FULL_PROBE))
return;
/*
@@ -1133,7 +1144,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* internal UARTs.
* We're going to explicitly set the UUE bit to 0 before
* trying to write and read a 1 just to make sure it's not
- * already a 1 and maybe locked there before we even start start.
+ * already a 1 and maybe locked there before we even start.
*/
iersave = serial_in(up, UART_IER);
serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
@@ -1329,10 +1340,7 @@ static void autoconfig(struct uart_8250_port *up)
serial8250_out_MCR(up, save_mcr);
serial8250_clear_fifos(up);
serial_in(up, UART_RX);
- if (up->capabilities & UART_CAP_UUE)
- serial_out(up, UART_IER, UART_IER_UUE);
- else
- serial_out(up, UART_IER, 0);
+ serial8250_clear_IER(up);
out_unlock:
spin_unlock_irqrestore(&port->lock, flags);
@@ -2042,6 +2050,9 @@ EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ return;
+
if (port->set_mctrl)
port->set_mctrl(port, mctrl);
else
@@ -2142,10 +2153,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
+ serial8250_clear_IER(up);
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
@@ -2294,6 +2302,10 @@ int serial8250_do_startup(struct uart_port *port)
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
+ retval = up->ops->setup_irq(up);
+ if (retval)
+ goto out;
+
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
@@ -2336,9 +2348,7 @@ int serial8250_do_startup(struct uart_port *port)
}
}
- retval = up->ops->setup_irq(up);
- if (retval)
- goto out;
+ up->ops->setup_timer(up);
/*
* Now, initialize the UART
@@ -2651,7 +2661,7 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int tolerance = port->uartclk / 100;
unsigned int min;
@@ -2737,7 +2747,7 @@ EXPORT_SYMBOL_GPL(serial8250_update_uartclk);
void
serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char cval;
@@ -2875,7 +2885,7 @@ EXPORT_SYMBOL(serial8250_do_set_termios);
static void
serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
if (port->set_termios)
port->set_termios(port, termios, old);
@@ -3187,9 +3197,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
- if (port->rs485.flags & SER_RS485_ENABLED)
- uart_rs485_config(port);
-
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
@@ -3314,8 +3321,13 @@ static void serial8250_console_restore(struct uart_8250_port *up)
unsigned int baud, quot, frac = 0;
termios.c_cflag = port->cons->cflag;
- if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_ispeed = port->cons->ispeed;
+ termios.c_ospeed = port->cons->ospeed;
+ if (port->state->port.tty && termios.c_cflag == 0) {
termios.c_cflag = port->state->port.tty->termios.c_cflag;
+ termios.c_ispeed = port->state->port.tty->termios.c_ispeed;
+ termios.c_ospeed = port->state->port.tty->termios.c_ospeed;
+ }
baud = serial8250_get_baud_rate(port, &termios, NULL);
quot = serial8250_get_divisor(port, baud, &frac);
@@ -3383,11 +3395,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
-
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
+ serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index ba4b63fd511e..434f83168546 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -127,6 +127,7 @@ config SERIAL_SB1250_DUART_CONSOLE
config SERIAL_ATMEL
bool "AT91 on-chip serial port support"
+ depends on COMMON_CLK
depends on ARCH_AT91 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
@@ -427,7 +428,7 @@ config SERIAL_PXA
config SERIAL_PXA_NON8250
bool
- depends on !SERIAL_8250
+ depends on !SERIAL_8250 || COMPILE_TEST
config SERIAL_PXA_CONSOLE
bool "Console on PXA serial port (DEPRECATED)"
@@ -602,21 +603,6 @@ config SERIAL_MUX_CONSOLE
select SERIAL_CORE_CONSOLE
default y
-config PDC_CONSOLE
- bool "PDC software console support"
- depends on PARISC && !SERIAL_MUX && VT
- help
- Saying Y here will enable the software based PDC console to be
- used as the system console. This is useful for machines in
- which the hardware based console has not been written yet. The
- following steps must be completed to use the PDC console:
-
- 1. create the device entry (mknod /dev/ttyB0 c 11 0)
- 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
- 3. Add device ttyB0 to /etc/securetty (if you want to log on as
- root on this console.)
- 4. Change the kernel command console parameter to: console=ttyB0
-
config SERIAL_SUNSAB
tristate "Sun Siemens SAB82532 serial support"
depends on SPARC && PCI
@@ -1325,7 +1311,7 @@ config SERIAL_FSL_LPUART
config SERIAL_FSL_LPUART_CONSOLE
bool "Console on Freescale lpuart serial port"
- depends on SERIAL_FSL_LPUART=y
+ depends on SERIAL_FSL_LPUART
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index cb791c5149a3..c2d154d78e54 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -9,6 +9,7 @@
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -48,7 +49,6 @@
#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
-#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF 16
/*
* Local per-uart structure.
@@ -59,10 +59,19 @@ struct altera_jtaguart {
unsigned long imr; /* Local IMR mirror */
};
+static unsigned int altera_jtaguart_tx_space(struct uart_port *port, u32 *ctlp)
+{
+ u32 ctl = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ if (ctlp)
+ *ctlp = ctl;
+
+ return FIELD_GET(ALTERA_JTAGUART_CONTROL_WSPACE_MSK, ctl);
+}
+
static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
{
- return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+ return altera_jtaguart_tx_space(port, NULL) ? TIOCSER_TEMT : 0;
}
static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
@@ -106,8 +115,8 @@ static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
}
static void altera_jtaguart_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
/* Just copy the old termios settings back */
if (old)
@@ -150,9 +159,7 @@ static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
pending = uart_circ_chars_pending(xmit);
if (pending > 0) {
- count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
- ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+ count = altera_jtaguart_tx_space(port, NULL);
if (count > pending)
count = pending;
if (count > 0) {
@@ -298,17 +305,17 @@ static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
- unsigned long status;
unsigned long flags;
+ u32 status;
spin_lock_irqsave(&port->lock, flags);
- while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ while (!altera_jtaguart_tx_space(port, &status)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
- spin_unlock_irqrestore(&port->lock, flags);
return; /* no connection activity */
}
- spin_unlock_irqrestore(&port->lock, flags);
+
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
}
@@ -321,8 +328,7 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ while (!altera_jtaguart_tx_space(port, NULL)) {
spin_unlock_irqrestore(&port->lock, flags);
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 8b749ed557c6..82f2790de28d 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -175,7 +175,7 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
static void altera_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
@@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port,
*/
}
-static void altera_uart_rx_chars(struct altera_uart *pp)
+static void altera_uart_rx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
unsigned char ch, flag;
unsigned short status;
@@ -246,9 +245,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
tty_flip_buffer_push(&port->state->port);
}
-static void altera_uart_tx_chars(struct altera_uart *pp)
+static void altera_uart_tx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
@@ -272,10 +270,8 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (xmit->head == xmit->tail) {
- pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
- altera_uart_update_ctrl_reg(pp);
- }
+ if (uart_circ_empty(xmit))
+ altera_uart_stop_tx(port);
}
static irqreturn_t altera_uart_interrupt(int irq, void *data)
@@ -288,9 +284,9 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
spin_lock(&port->lock);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
- altera_uart_rx_chars(pp);
+ altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
- altera_uart_tx_chars(pp);
+ altera_uart_tx_chars(port);
spin_unlock(&port->lock);
return IRQ_RETVAL(isr);
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index fae0b581ff42..af27fb8ec145 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -370,7 +370,7 @@ static void pl010_shutdown(struct uart_port *port)
static void
pl010_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int lcr_h, old_cr;
unsigned long flags;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 15f0e4d88c5a..5cdced39eafd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2030,7 +2030,7 @@ pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2162,7 +2162,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
static void
sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2777,6 +2777,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
int portnr, ret;
+ u32 val;
portnr = pl011_find_free_port();
if (portnr < 0)
@@ -2801,6 +2802,21 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
+ if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) {
+ switch (val) {
+ case 1:
+ uap->port.iotype = UPIO_MEM;
+ break;
+ case 4:
+ uap->port.iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
return ret;
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 9ef82d870ff2..450f4edfda0f 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -228,7 +228,7 @@ static void apbuart_shutdown(struct uart_port *port)
}
static void apbuart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios, const struct ktermios *old)
{
unsigned int cr;
unsigned long flags;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 32caeac12985..925484a42c82 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -283,7 +283,7 @@ static void ar933x_uart_get_scale_step(unsigned int clk,
static void ar933x_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
@@ -583,6 +583,13 @@ static const struct uart_ops ar933x_uart_ops = {
static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
+
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ gpiod_set_value(up->rts_gpiod,
+ !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
+
return 0;
}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 2a09e92ef9ed..2a65ea2660e1 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -351,7 +351,7 @@ static void arc_serial_shutdown(struct uart_port *port)
static void
arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct arc_uart_port *uart = to_arc_port(port);
unsigned int baud, uartl, uarth, hw_val;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7450d3853031..bd07f79a2df9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
@@ -110,6 +111,7 @@ struct atmel_uart_char {
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
+ struct clk *gclk; /* uart generic clock */
int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
@@ -150,6 +152,7 @@ struct atmel_uart_port {
u32 rts_low;
bool ms_irq_enabled;
u32 rtor; /* address of receiver timeout register if it exists */
+ bool is_usart;
bool has_frac_baudrate;
bool has_hw_timer;
struct timer_list uart_timer;
@@ -228,6 +231,11 @@ static inline int atmel_uart_is_half_duplex(struct uart_port *port)
(port->iso7816.flags & SER_ISO7816_ENABLED);
}
+static inline int atmel_error_rate(int desired_value, int actual_value)
+{
+ return 100 - (desired_value * 100) / actual_value;
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -1825,6 +1833,7 @@ static void atmel_get_ip_name(struct uart_port *port)
*/
atmel_port->has_frac_baudrate = false;
atmel_port->has_hw_timer = false;
+ atmel_port->is_usart = false;
if (name == new_uart) {
dev_dbg(port->dev, "Uart with hw timer");
@@ -1834,6 +1843,7 @@ static void atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "Usart\n");
atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
+ atmel_port->is_usart = true;
atmel_port->rtor = ATMEL_US_RTOR;
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
@@ -1863,6 +1873,7 @@ static void atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "This version is usart\n");
atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
+ atmel_port->is_usart = true;
atmel_port->rtor = ATMEL_US_RTOR;
break;
case 0x203:
@@ -2113,6 +2124,8 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
* This is called on uart_close() or a suspend event.
*/
clk_disable_unprepare(atmel_port->clk);
+ if (__clk_is_enabled(atmel_port->gclk))
+ clk_disable_unprepare(atmel_port->gclk);
break;
default:
dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
@@ -2122,19 +2135,25 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
/*
* Change the port parameters
*/
-static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void atmel_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
- unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
+ unsigned int old_mode, mode, imr, quot, div, cd, fp = 0;
+ unsigned int baud, actual_baud, gclk_rate;
+ int ret;
/* save the current mode register */
mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
/* reset the mode, clock divisor, parity, stop bits and data size */
- mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
- ATMEL_US_PAR | ATMEL_US_USMODE);
+ if (atmel_port->is_usart)
+ mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL |
+ ATMEL_US_USCLKS | ATMEL_US_USMODE);
+ else
+ mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
@@ -2282,10 +2301,60 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
cd = uart_get_divisor(port, baud);
}
- if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
+ /*
+ * If the current value of the Clock Divisor surpasses the 16 bit
+ * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral
+ * Clock implicitly divided by 8.
+ * If the IP is UART however, keep the highest possible value for
+ * the CD and avoid needless division of CD, since UART IP's do not
+ * support implicit division of the Peripheral Clock.
+ */
+ if (atmel_port->is_usart && cd > ATMEL_US_CD) {
cd /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
+ } else {
+ cd = min_t(unsigned int, cd, ATMEL_US_CD);
+ }
+
+ /*
+ * If there is no Fractional Part, there is a high chance that
+ * we may be able to generate a baudrate closer to the desired one
+ * if we use the GCLK as the clock source driving the baudrate
+ * generator.
+ */
+ if (!atmel_port->has_frac_baudrate) {
+ if (__clk_is_enabled(atmel_port->gclk))
+ clk_disable_unprepare(atmel_port->gclk);
+ gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud);
+ actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd);
+ if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) >
+ abs(atmel_error_rate(baud, gclk_rate / 16))) {
+ clk_set_rate(atmel_port->gclk, 16 * baud);
+ ret = clk_prepare_enable(atmel_port->gclk);
+ if (ret)
+ goto gclk_fail;
+
+ if (atmel_port->is_usart) {
+ mode &= ~ATMEL_US_USCLKS;
+ mode |= ATMEL_US_USCLKS_GCLK;
+ } else {
+ mode |= ATMEL_UA_BRSRCCK;
+ }
+
+ /*
+ * Set the Clock Divisor for GCLK to 1.
+ * Since we were able to generate the smallest
+ * multiple of the desired baudrate times 16,
+ * then we surely can generate a bigger multiple
+ * with the exact error rate for an equally increased
+ * CD. Thus no need to take into account
+ * a higher value for CD.
+ */
+ cd = 1;
+ }
}
+
+gclk_fail:
quot = cd | fp << ATMEL_US_FP_OFFSET;
if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
@@ -2881,6 +2950,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err;
+ atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk");
+ if (IS_ERR(atmel_port->gclk)) {
+ ret = PTR_ERR(atmel_port->gclk);
+ goto err_clk_disable_unprepare;
+ }
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
goto err_clk_disable_unprepare;
diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index 0d8a0f9cc5c3..87f8f7996307 100644
--- a/drivers/tty/serial/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
@@ -9,6 +9,8 @@
* Based on AT91RM9200 datasheet revision E.
*/
+#include <linux/bitfield.h>
+
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
@@ -39,39 +41,42 @@
#define ATMEL_US_MR 0x04 /* Mode Register */
#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USMODE_NORMAL FIELD_PREP(ATMEL_US_USMODE, 0)
+#define ATMEL_US_USMODE_RS485 FIELD_PREP(ATMEL_US_USMODE, 1)
+#define ATMEL_US_USMODE_HWHS FIELD_PREP(ATMEL_US_USMODE, 2)
+#define ATMEL_US_USMODE_MODEM FIELD_PREP(ATMEL_US_USMODE, 3)
+#define ATMEL_US_USMODE_ISO7816_T0 FIELD_PREP(ATMEL_US_USMODE, 4)
+#define ATMEL_US_USMODE_ISO7816_T1 FIELD_PREP(ATMEL_US_USMODE, 6)
+#define ATMEL_US_USMODE_IRDA FIELD_PREP(ATMEL_US_USMODE, 8)
#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_USCLKS_MCK FIELD_PREP(ATMEL_US_USCLKS, 0)
+#define ATMEL_US_USCLKS_MCK_DIV8 FIELD_PREP(ATMEL_US_USCLKS, 1)
+#define ATMEL_US_USCLKS_GCLK FIELD_PREP(ATMEL_US_USCLKS, 2)
+#define ATMEL_US_USCLKS_SCK FIELD_PREP(ATMEL_US_USCLKS, 3)
+#define ATMEL_UA_FILTER BIT(4)
#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_CHRL_5 FIELD_PREP(ATMEL_US_CHRL, 0)
+#define ATMEL_US_CHRL_6 FIELD_PREP(ATMEL_US_CHRL, 1)
+#define ATMEL_US_CHRL_7 FIELD_PREP(ATMEL_US_CHRL, 2)
+#define ATMEL_US_CHRL_8 FIELD_PREP(ATMEL_US_CHRL, 3)
#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_PAR_EVEN FIELD_PREP(ATMEL_US_PAR, 0)
+#define ATMEL_US_PAR_ODD FIELD_PREP(ATMEL_US_PAR, 1)
+#define ATMEL_US_PAR_SPACE FIELD_PREP(ATMEL_US_PAR, 2)
+#define ATMEL_US_PAR_MARK FIELD_PREP(ATMEL_US_PAR, 3)
+#define ATMEL_US_PAR_NONE FIELD_PREP(ATMEL_US_PAR, 4)
+#define ATMEL_US_PAR_MULTI_DROP FIELD_PREP(ATMEL_US_PAR, 6)
#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_NBSTOP_1 FIELD_PREP(ATMEL_US_NBSTOP, 0)
+#define ATMEL_US_NBSTOP_1_5 FIELD_PREP(ATMEL_US_NBSTOP, 1)
+#define ATMEL_US_NBSTOP_2 FIELD_PREP(ATMEL_US_NBSTOP, 2)
+#define ATMEL_UA_BRSRCCK BIT(12) /* Clock Selection for UART */
#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_CHMODE_NORMAL FIELD_PREP(ATMEL_US_CHMODE, 0)
+#define ATMEL_US_CHMODE_ECHO FIELD_PREP(ATMEL_US_CHMODE, 1)
+#define ATMEL_US_CHMODE_LOC_LOOP FIELD_PREP(ATMEL_US_CHMODE, 2)
+#define ATMEL_US_CHMODE_REM_LOOP FIELD_PREP(ATMEL_US_CHMODE, 3)
#define ATMEL_US_MSBF BIT(16) /* Bit Order */
#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
@@ -79,7 +84,7 @@
#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
#define ATMEL_US_MAX_ITER_MASK GENMASK(26, 24) /* Max Iterations */
-#define ATMEL_US_MAX_ITER(n) (((n) << 24) & ATMEL_US_MAX_ITER_MASK)
+#define ATMEL_US_MAX_ITER(n) FIELD_PREP(ATMEL_US_MAX_ITER_MASK, (n))
#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
@@ -131,19 +136,19 @@
#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
-#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
-#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
+#define ATMEL_US_TXRDYM(data) FIELD_PREP(GENMASK(1, 0), (data)) /* TX Ready Mode */
+#define ATMEL_US_RXRDYM(data) FIELD_PREP(GENMASK(5, 4), (data)) /* RX Ready Mode */
#define ATMEL_US_ONE_DATA 0x0
#define ATMEL_US_TWO_DATA 0x1
#define ATMEL_US_FOUR_DATA 0x2
#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
-#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
-#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
-#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
+#define ATMEL_US_TXFTHRES(thr) FIELD_PREP(GENMASK(13, 8), (thr)) /* TX FIFO Threshold */
+#define ATMEL_US_RXFTHRES(thr) FIELD_PREP(GENMASK(21, 16), (thr)) /* RX FIFO Threshold */
+#define ATMEL_US_RXFTHRES2(thr) FIELD_PREP(GENMASK(29, 24), (thr)) /* RX FIFO Threshold2 */
#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
-#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
-#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
+#define ATMEL_US_TXFL(reg) FIELD_GET(GENMASK(5, 0), (reg)) /* TX FIFO Level */
+#define ATMEL_US_RXFL(reg) FIELD_GET(GENMASK(21, 16), (reg)) /* RX FIFO Level */
#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 53b43174aa40..5d9737c2d1f2 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -492,9 +492,8 @@ static void bcm_uart_shutdown(struct uart_port *port)
/*
* serial core request to change current uart setting
*/
-static void bcm_uart_set_termios(struct uart_port *port,
- struct ktermios *new,
- struct ktermios *old)
+static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
unsigned int ctl, baud, quot, ier;
unsigned long flags;
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b9b66ad31a08..404b43a5ae33 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -251,7 +251,7 @@ static void uart_clps711x_shutdown(struct uart_port *port)
static void uart_clps711x_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
u32 ubrlcr;
unsigned int baud, quot;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 8c582779cf22..0577618e78c0 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -87,7 +87,6 @@ struct uart_cpm_port {
struct gpio_desc *gpios[NUM_GPIOS];
};
-extern int cpm_uart_nr;
extern struct uart_cpm_port cpm_uart_ports[UART_NR];
/* these are located in their respective files */
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index db07d6a5d764..b4369ed45ae2 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -484,12 +484,11 @@ static void cpm_uart_shutdown(struct uart_port *port)
static void cpm_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
int baud;
unsigned long flags;
u16 cval, scval, prev_mode;
- int bits, sbits;
struct uart_cpm_port *pinfo =
container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
@@ -515,28 +514,17 @@ static void cpm_uart_set_termios(struct uart_port *port,
if (maxidl > 0x10)
maxidl = 0x10;
- /* Character length programmed into the mode register is the
- * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
- * 1 or 2 stop bits, minus 1.
- * The value 'bits' counts this for us.
- */
cval = 0;
scval = 0;
- /* byte size */
- bits = tty_get_char_size(termios->c_cflag);
- sbits = bits - 5;
-
if (termios->c_cflag & CSTOPB) {
cval |= SMCMR_SL; /* Two stops */
scval |= SCU_PSMR_SL;
- bits++;
}
if (termios->c_cflag & PARENB) {
cval |= SMCMR_PEN;
scval |= SCU_PSMR_PEN;
- bits++;
if (!(termios->c_cflag & PARODD)) {
cval |= SMCMR_PM_EVEN;
scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP);
@@ -580,12 +568,9 @@ static void cpm_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- /* Start bit has not been added (so don't, because we would just
- * subtract it later), and we need to add one for the number of
- * stops bits (there is always at least one).
- */
- bits++;
if (IS_SMC(pinfo)) {
+ unsigned int bits = tty_get_frame_size(termios->c_cflag);
+
/*
* MRBLR can be changed while an SMC/SCC is operating only
* if it is done in a single bus cycle with one 16-bit move
@@ -604,13 +589,17 @@ static void cpm_uart_set_termios(struct uart_port *port,
*/
prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN);
/* Output in *one* operation, so we don't interrupt RX/TX if they
- * were already enabled. */
- out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
- SMCMR_SM_UART | prev_mode);
+ * were already enabled.
+ * Character length programmed into the register is frame bits minus 1.
+ */
+ out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits - 1) | cval |
+ SMCMR_SM_UART | prev_mode);
} else {
+ unsigned int bits = tty_get_char_size(termios->c_cflag);
+
out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
out_be16(&pinfo->sccup->scc_maxidl, maxidl);
- out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
+ out_be16(&sccp->scc_psmr, (UART_LCR_WLEN(bits) << 12) | scval);
}
if (pinfo->clk)
@@ -1214,12 +1203,6 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
- pinfo->port.irq = irq_of_parse_and_map(np, 0);
- if (pinfo->port.irq == NO_IRQ) {
- ret = -EINVAL;
- goto out_pram;
- }
-
for (i = 0; i < NUM_GPIOS; i++) {
struct gpio_desc *gpiod;
@@ -1229,7 +1212,7 @@ static int cpm_uart_init_port(struct device_node *np,
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
- goto out_irq;
+ goto out_pram;
}
if (gpiod) {
@@ -1255,8 +1238,6 @@ static int cpm_uart_init_port(struct device_node *np,
return cpm_uart_request_port(&pinfo->port);
-out_irq:
- irq_dispose_mapping(pinfo->port.irq);
out_pram:
cpm_uart_unmap_pram(pinfo, pram);
out_mem:
@@ -1436,11 +1417,17 @@ static int cpm_uart_probe(struct platform_device *ofdev)
/* initialize the device pointer for the port */
pinfo->port.dev = &ofdev->dev;
+ pinfo->port.irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!pinfo->port.irq)
+ return -EINVAL;
+
ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo);
- if (ret)
- return ret;
+ if (!ret)
+ return uart_add_one_port(&cpm_reg, &pinfo->port);
+
+ irq_dispose_mapping(pinfo->port.irq);
- return uart_add_one_port(&cpm_reg, &pinfo->port);
+ return ret;
}
static int cpm_uart_remove(struct platform_device *ofdev)
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index af951e6a2ef4..0c0a62346f23 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -287,7 +287,7 @@ static void digicolor_uart_shutdown(struct uart_port *port)
static void digicolor_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, divisor;
u8 config = 0;
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 2e21acf39720..829b452daee9 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -559,7 +559,7 @@ static void dz_reset(struct dz_port *dport)
}
static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct dz_port *dport = to_dport(uport);
unsigned long flags;
@@ -592,9 +592,12 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
bflag = dz_encode_baud_rate(baud);
- if (bflag < 0) { /* Try to keep unchanged. */
- baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
- bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) {
+ if (old_termios) {
+ /* Keep unchanged. */
+ baud = tty_termios_baud_rate(old_termios);
+ bflag = dz_encode_baud_rate(baud);
+ }
if (bflag < 0) { /* Resort to 9600. */
baud = 9600;
bflag = DZ_B9600;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 88d08ba1ca83..a5f380584cda 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -67,7 +67,7 @@ static void __init earlycon_init(struct earlycon_device *device,
if (*s)
earlycon->index = simple_strtoul(s, NULL, 10);
len = s - name;
- strlcpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
+ strscpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
earlycon->data = &early_console_dev;
}
@@ -123,7 +123,7 @@ static int __init parse_options(struct earlycon_device *device, char *options)
device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " ") + 1,
(size_t)(sizeof(device->options)));
- strlcpy(device->options, options, length);
+ strscpy(device->options, options, length);
}
return 0;
@@ -304,7 +304,7 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
if (options) {
early_console_dev.baud = simple_strtoul(options, NULL, 0);
- strlcpy(early_console_dev.options, options,
+ strscpy(early_console_dev.options, options,
sizeof(early_console_dev.options));
}
earlycon_init(&early_console_dev, match->name);
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 98bb0c315e13..84e8153e5420 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -401,7 +401,7 @@ static void linflex_shutdown(struct uart_port *port)
static void
linflex_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned long cr, old_cr, cr1;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index fbc4b071b330..67fa113f77d4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1284,17 +1284,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct dma_slave_config dma_rx_sconfig = {};
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
- int bits, baud;
struct tty_port *port = &sport->port.state->port;
struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
struct dma_chan *chan = sport->dma_rx_chan;
-
- baud = tty_get_baud_rate(tty);
-
- bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
- if (termios->c_cflag & PARENB)
- bits++;
+ unsigned int bits = tty_get_frame_size(termios->c_cflag);
+ unsigned int baud = tty_get_baud_rate(tty);
/*
* Calculate length of one DMA buffer size to keep latency below
@@ -1776,6 +1771,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
+ sport->lpuart_dma_rx_use = false;
}
if (sport->lpuart_dma_tx_use) {
@@ -1784,6 +1780,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
+ sport->lpuart_dma_tx_use = false;
}
if (sport->dma_tx_chan)
@@ -1833,7 +1830,7 @@ static void lpuart32_shutdown(struct uart_port *port)
static void
lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
@@ -2073,7 +2070,7 @@ static void lpuart32_serial_setbrg(struct lpuart_port *sport,
static void
lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
@@ -2729,15 +2726,13 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_reset;
- ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret)
- goto failed_attach_port;
-
ret = uart_get_rs485_mode(&sport->port);
if (ret)
goto failed_get_rs485;
- uart_rs485_config(&sport->port);
+ ret = uart_add_one_port(&lpuart_reg, &sport->port);
+ if (ret)
+ goto failed_attach_port;
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
@@ -2747,9 +2742,9 @@ static int lpuart_probe(struct platform_device *pdev)
return 0;
failed_irq_request:
-failed_get_rs485:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
+failed_get_rs485:
failed_reset:
lpuart_disable_clks(sport);
return ret;
@@ -2800,7 +2795,7 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
* EDMA driver during suspend will forcefully release any
* non-idle DMA channels. If port wakeup is enabled or if port
* is console port or 'no_console_suspend' is set the Rx DMA
- * cannot resume as as expected, hence gracefully release the
+ * cannot resume as expected, hence gracefully release the
* Rx DMA path before suspend and start Rx DMA path on resume.
*/
if (irq_wake) {
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 45df29947fe8..819f957b6b84 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1351,9 +1351,8 @@ static void icom_close(struct uart_port *port)
kref_put(&icom_port->adapter->kref, icom_kref_release);
}
-static void icom_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old_termios)
+static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old_termios)
{
struct icom_port *icom_port = to_icom_port(port);
int baud;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 522445a8f666..05b432dc7a85 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -380,8 +380,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
- sport->port.mctrl |= TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
}
/* called with port.lock taken and irqs caller dependent */
@@ -390,8 +389,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
- sport->port.mctrl &= ~TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
}
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
@@ -1620,7 +1618,7 @@ static void imx_uart_flush_buffer(struct uart_port *port)
static void
imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
@@ -2347,8 +2345,6 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- uart_rs485_config(&sport->port);
-
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 655e64b26852..dd0a8915ce4f 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -873,7 +873,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
/* The port lock is not held. */
static void
ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 0ea799bf8dbb..417a5b6bffc3 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -211,7 +211,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
- return -ENXIO;
+ rc = -ENXIO;
+ goto out_kfree_brd;
}
rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd);
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index cb58bdec2f43..222afc270c88 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -300,8 +300,8 @@ static void jsm_tty_close(struct uart_port *port)
}
static void jsm_tty_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old_termios)
+ struct ktermios *termios,
+ const struct ktermios *old_termios)
{
unsigned long lock_flags;
struct jsm_channel *channel =
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index a3120c3347dd..c892f3c7d1ab 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -8,6 +8,7 @@
* Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/device.h>
@@ -93,7 +94,6 @@
#define ASCFSTAT_RXFFLMASK 0x003F
#define ASCFSTAT_TXFFLMASK 0x3F00
#define ASCFSTAT_TXFREEMASK 0x3F000000
-#define ASCFSTAT_TXFREEOFF 24
static void lqasc_tx_chars(struct uart_port *port);
static struct ltq_uart_port *lqasc_port[MAXPORTS];
@@ -139,6 +139,13 @@ lqasc_stop_tx(struct uart_port *port)
return;
}
+static bool lqasc_tx_ready(struct uart_port *port)
+{
+ u32 fstat = __raw_readl(port->membase + LTQ_ASC_FSTAT);
+
+ return FIELD_GET(ASCFSTAT_TXFREEMASK, fstat);
+}
+
static void
lqasc_start_tx(struct uart_port *port)
{
@@ -228,8 +235,7 @@ lqasc_tx_chars(struct uart_port *port)
return;
}
- while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
- ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
+ while (lqasc_tx_ready(port)) {
if (port->x_char) {
writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
port->icount.tx++;
@@ -405,8 +411,8 @@ lqasc_shutdown(struct uart_port *port)
}
static void
-lqasc_set_termios(struct uart_port *port,
- struct ktermios *new, struct ktermios *old)
+lqasc_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
unsigned int cflag;
unsigned int iflag;
@@ -600,15 +606,12 @@ static const struct uart_ops lqasc_pops = {
static void
lqasc_console_putchar(struct uart_port *port, unsigned char ch)
{
- int fifofree;
-
if (!port->membase)
return;
- do {
- fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
- & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
- } while (fifofree == 0);
+ while (!lqasc_tx_ready(port))
+ ;
+
writeb(ch, port->membase + LTQ_ASC_TBUF);
}
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 328b50521f14..4c0604325ee9 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -178,7 +178,7 @@ static void liteuart_shutdown(struct uart_port *port)
}
static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned long flags;
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 93140cac1ca1..ed47f4768338 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -278,6 +278,13 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
static void serial_lpc32xx_stop_tx(struct uart_port *port);
+static bool serial_lpc32xx_tx_ready(struct uart_port *port)
+{
+ u32 level = readl(LPC32XX_HSUART_LEVEL(port->membase));
+
+ return LPC32XX_HSU_TX_LEV(level) < 64;
+}
+
static void __serial_lpc32xx_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -293,8 +300,7 @@ static void __serial_lpc32xx_tx(struct uart_port *port)
goto exit_tx;
/* Transfer data */
- while (LPC32XX_HSU_TX_LEV(readl(
- LPC32XX_HSUART_LEVEL(port->membase))) < 64) {
+ while (serial_lpc32xx_tx_ready(port)) {
writel((u32) xmit->buf[xmit->tail],
LPC32XX_HSUART_FIFO(port->membase));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -493,7 +499,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
/* port->lock is not held. */
static void serial_lpc32xx_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, quot;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 0b5f21fbb53d..c69602f356fd 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -418,7 +418,7 @@ static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void
max3100_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 7cf81f692ac4..fbf6e2b3161c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -906,7 +906,7 @@ static void max310x_break_ctl(struct uart_port *port, int break_state)
static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int lcr = 0, flow = 0;
int baud;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index f4aaaadd0742..b1cd9a76dd93 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -192,7 +192,7 @@ static void mcf_shutdown(struct uart_port *port)
/****************************************************************************/
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 12117b596e73..3690f5cf0f43 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -646,8 +646,8 @@ static void men_z135_shutdown(struct uart_port *port)
}
static void men_z135_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct men_z135_port *uart = to_men_z135(port);
unsigned int baud;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 6c8db19fd572..056243c12836 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -335,7 +335,7 @@ static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
static void meson_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int cflags, iflags, baud;
unsigned long flags;
@@ -667,29 +667,6 @@ static struct uart_driver meson_uart_driver = {
.cons = MESON_SERIAL_CONSOLE,
};
-static inline struct clk *meson_uart_probe_clock(struct device *dev,
- const char *id)
-{
- struct clk *clk = NULL;
- int ret;
-
- clk = devm_clk_get(dev, id);
- if (IS_ERR(clk))
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "couldn't enable clk\n");
- return ERR_PTR(ret);
- }
-
- devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
-
- return clk;
-}
-
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
{
@@ -697,15 +674,15 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
struct clk *clk_pclk = NULL;
struct clk *clk_baud = NULL;
- clk_pclk = meson_uart_probe_clock(&pdev->dev, "pclk");
+ clk_pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(clk_pclk))
return PTR_ERR(clk_pclk);
- clk_xtal = meson_uart_probe_clock(&pdev->dev, "xtal");
+ clk_xtal = devm_clk_get_enabled(&pdev->dev, "xtal");
if (IS_ERR(clk_xtal))
return PTR_ERR(clk_xtal);
- clk_baud = meson_uart_probe_clock(&pdev->dev, "baud");
+ clk_baud = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(clk_baud))
return PTR_ERR(clk_baud);
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 347088bb380e..c15e0d84dc7e 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -298,7 +298,8 @@ static void mlb_usio_shutdown(struct uart_port *port)
}
static void mlb_usio_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int escr, smr = MLB_USIO_SMR_SOE;
unsigned long flags, baud, quot;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3f1986c89694..73362d4bc45d 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -101,7 +101,7 @@ struct psc_ops {
void (*cw_restore_ints)(struct uart_port *port);
unsigned int (*set_baudrate)(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
int (*clock_alloc)(struct uart_port *port);
void (*clock_relse)(struct uart_port *port);
int (*clock)(struct uart_port *port, int enable);
@@ -287,7 +287,7 @@ static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -305,7 +305,7 @@ static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -533,7 +533,7 @@ static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -880,7 +880,7 @@ static inline void mpc5125_set_divisor(struct mpc5125_psc __iomem *psc,
static unsigned int mpc5125_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -1167,7 +1167,7 @@ mpc52xx_uart_shutdown(struct uart_port *port)
static void
mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned char mr1, mr2;
@@ -1364,7 +1364,7 @@ static const struct uart_ops mpc52xx_uart_ops = {
/* Interrupt handling */
/* ======================================================================== */
-static inline unsigned int
+static inline bool
mpc52xx_uart_int_rx_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
@@ -1425,7 +1425,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
return psc_ops->raw_rx_rdy(port);
}
-static inline int
+static inline bool
mpc52xx_uart_int_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -1435,13 +1435,13 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
psc_ops->write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
- return 1;
+ return true;
}
/* Nothing to do ? */
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
mpc52xx_uart_stop_tx(port);
- return 0;
+ return false;
}
/* Send chars */
@@ -1460,23 +1460,23 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
/* Maybe we're done after all */
if (uart_circ_empty(xmit)) {
mpc52xx_uart_stop_tx(port);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static irqreturn_t
mpc5xxx_uart_process_int(struct uart_port *port)
{
unsigned long pass = ISR_PASS_LIMIT;
- unsigned int keepgoing;
+ bool keepgoing;
u8 status;
/* While we have stuff to do, we continue */
do {
/* If we don't find anything to do, we stop */
- keepgoing = 0;
+ keepgoing = false;
psc_ops->rx_clr_irq(port);
if (psc_ops->rx_rdy(port))
@@ -1495,7 +1495,7 @@ mpc5xxx_uart_process_int(struct uart_port *port)
/* Limit number of iteration */
if (!(--pass))
- keepgoing = 0;
+ keepgoing = false;
} while (keepgoing);
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 5e9429dcc51f..2e3e6cf16817 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -358,7 +358,7 @@ static void mps2_uart_shutdown(struct uart_port *port)
static void
mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, bauddiv;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 3159889ddae1..7dd19a281579 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1263,7 +1263,7 @@ static void msm_shutdown(struct uart_port *port)
}
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 0ba0f4d9459d..ed0e763f622a 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -289,7 +289,7 @@ static void mux_shutdown(struct uart_port *port)
*/
static void
mux_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
}
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 65eaecd10b7c..ba16e1da6bd3 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -564,7 +564,7 @@ static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned in
static void mvebu_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, min_baud, max_baud;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 1944daf8593a..d21a4f3ef2fe 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -959,7 +959,7 @@ err_out:
#define CTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_CTS)
static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct mxs_auart_port *s = to_auart_port(u);
u32 ctrl, ctrl2, div;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0aa666e247d5..7d0d2718ef59 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -300,8 +300,7 @@ static void serial_omap_stop_tx(struct uart_port *port)
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
- if (up->rts_gpiod &&
- gpiod_get_value(up->rts_gpiod) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
@@ -337,19 +336,24 @@ static void serial_omap_stop_rx(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
+static void serial_omap_put_char(struct uart_omap_port *up, unsigned char ch)
+{
+ serial_out(up, UART_TX, ch);
+
+ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
+ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ up->rs485_tx_filter_count++;
+}
+
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
- serial_out(up, UART_TX, up->port.x_char);
+ serial_omap_put_char(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
- if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
- !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
- up->rs485_tx_filter_count++;
-
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
@@ -358,12 +362,9 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
}
count = up->port.fifosize / 4;
do {
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ serial_omap_put_char(up, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
- if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
- !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
- up->rs485_tx_filter_count++;
if (uart_circ_empty(xmit))
break;
@@ -397,7 +398,7 @@ static void serial_omap_start_tx(struct uart_port *port)
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
- if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
gpiod_set_value(up->rts_gpiod, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
@@ -802,7 +803,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work)
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
@@ -1336,13 +1337,11 @@ serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
up->ier = 0;
serial_out(up, UART_IER, 0);
- if (up->rts_gpiod) {
- /* enable / disable rts */
- val = (rs485->flags & SER_RS485_ENABLED) ?
- SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
- val = (rs485->flags & val) ? 1 : 0;
- gpiod_set_value(up->rts_gpiod, val);
- }
+ /* enable / disable rts */
+ val = (rs485->flags & SER_RS485_ENABLED) ?
+ SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
+ val = (rs485->flags & val) ? 1 : 0;
+ gpiod_set_value(up->rts_gpiod, val);
/* Enable interrupts */
up->ier = mode;
@@ -1547,11 +1546,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
ret = PTR_ERR(up->rts_gpiod);
if (ret == -EPROBE_DEFER)
return ret;
- /*
- * FIXME: the code historically ignored any other error than
- * -EPROBE_DEFER and just went on without GPIO.
- */
+
up->rts_gpiod = NULL;
+ up->port.rs485_supported = (const struct serial_rs485) { };
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_err(dev, "disabling RS-485 (rts-gpio missing in device tree)\n");
+ memset(rs485conf, 0, sizeof(*rs485conf));
+ }
} else {
gpiod_set_consumer_name(up->rts_gpiod, "omap-serial");
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 888e17e3f25f..fde39cc1145d 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -328,7 +328,7 @@ static void owl_uart_change_baudrate(struct owl_uart_port *owl_port,
static void owl_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct owl_uart_port *owl_port = to_owl_uart_port(port);
unsigned int baud;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 8a9065e4a903..c59ce7886579 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -898,9 +898,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
fifo_size--;
}
- bytes = min((int)CIRC_CNT(xmit->head, xmit->tail,
- UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head,
- xmit->tail, UART_XMIT_SIZE));
+ bytes = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!bytes) {
dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__);
pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
@@ -1301,7 +1299,8 @@ static void pch_uart_shutdown(struct uart_port *port)
*bits. Update read_status_mask and ignore_status_mask to indicate
*the types of events we are interested in receiving. */
static void pch_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
int rtn;
unsigned int baud, parity, bits, stb;
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index f418f1de66b3..2beada66c824 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -50,7 +50,7 @@
* @irq_rx_name: irq rx name
* @irq_tx: virtual tx interrupt number
* @irq_tx_name: irq tx name
- * @cts_gpio: clear to send gpio
+ * @cts_gpiod: clear to send GPIO
* @dev: device descriptor
**/
struct pic32_sport {
@@ -65,8 +65,7 @@ struct pic32_sport {
const char *irq_tx_name;
bool enable_tx_irq;
- bool hw_flow_ctrl;
- int cts_gpio;
+ struct gpio_desc *cts_gpiod;
struct clk *clk;
@@ -158,25 +157,16 @@ static void pic32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
PIC32_UART_MODE_LPBK);
}
-/* get the state of CTS input pin for this port */
-static unsigned int get_cts_state(struct pic32_sport *sport)
-{
- /* read and invert UxCTS */
- if (gpio_is_valid(sport->cts_gpio))
- return !gpio_get_value(sport->cts_gpio);
-
- return 1;
-}
-
/* serial core request to return the state of misc UART input pins */
static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
{
struct pic32_sport *sport = to_pic32_sport(port);
unsigned int mctrl = 0;
- if (!sport->hw_flow_ctrl)
+ /* get the state of CTS input pin for this port */
+ if (!sport->cts_gpiod)
mctrl |= TIOCM_CTS;
- else if (get_cts_state(sport))
+ else if (gpiod_get_value(sport->cts_gpiod))
mctrl |= TIOCM_CTS;
/* DSR and CD are not supported in PIC32, so return 1
@@ -609,7 +599,7 @@ static void pic32_uart_shutdown(struct uart_port *port)
/* serial core request to change current uart setting */
static void pic32_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct pic32_sport *sport = to_pic32_sport(port);
unsigned int baud;
@@ -648,7 +638,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
PIC32_UART_MODE_PDSEL0);
}
/* if hw flow ctrl, then the pins must be specified in device tree */
- if ((new->c_cflag & CRTSCTS) && sport->hw_flow_ctrl) {
+ if ((new->c_cflag & CRTSCTS) && sport->cts_gpiod) {
/* enable hardware flow control */
pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
PIC32_UART_MODE_UEN1);
@@ -875,7 +865,8 @@ static struct uart_driver pic32_uart_driver = {
static int pic32_uart_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct pic32_sport *sport;
int uart_idx = 0;
struct resource *res_mem;
@@ -904,25 +895,10 @@ static int pic32_uart_probe(struct platform_device *pdev)
/* Hardware flow control: gpios
* !Note: Basically, CTS is needed for reading the status.
*/
- sport->hw_flow_ctrl = false;
- sport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0);
- if (gpio_is_valid(sport->cts_gpio)) {
- sport->hw_flow_ctrl = true;
-
- ret = devm_gpio_request(sport->dev,
- sport->cts_gpio, "CTS");
- if (ret) {
- dev_err(&pdev->dev,
- "error requesting CTS GPIO\n");
- goto err;
- }
-
- ret = gpio_direction_input(sport->cts_gpio);
- if (ret) {
- dev_err(&pdev->dev, "error setting CTS GPIO\n");
- goto err;
- }
- }
+ sport->cts_gpiod = devm_gpiod_get_optional(dev, "cts", GPIOD_IN);
+ if (IS_ERR(sport->cts_gpiod))
+ return dev_err_probe(dev, PTR_ERR(sport->cts_gpiod), "error requesting CTS GPIO\n");
+ gpiod_set_consumer_name(sport->cts_gpiod, "CTS");
pic32_sports[uart_idx] = sport;
port = &sport->port;
@@ -943,7 +919,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
}
#ifdef CONFIG_SERIAL_PIC32_CONSOLE
- if (uart_console(port) && (pic32_console.flags & CON_ENABLED)) {
+ if (uart_console_enabled(port)) {
/* The peripheral clock has been enabled by console_setup,
* so disable it till the port is used.
*/
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index f63257b8e872..fe2e4ec423f7 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1202,7 +1202,7 @@ static void pmz_irda_setup(struct uart_pmac_port *uap, unsigned long *baud)
static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pmac_port *uap = to_pmz(port);
unsigned long baud;
@@ -1244,7 +1244,7 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
/* The port lock is not held. */
static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 9309ffd87c8e..2d25231fad84 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -423,7 +423,7 @@ static void serial_pxa_shutdown(struct uart_port *port)
static void
serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char cval, fcr = 0;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f4698a064a4d..83b66b73303a 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG 0x22c
@@ -1005,7 +1006,8 @@ static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
}
static void qcom_geni_serial_set_termios(struct uart_port *uport,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud;
u32 bits_per_char;
@@ -1524,7 +1526,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
* even with no_console_suspend
*/
if (uart_console(uport)) {
- geni_icc_set_tag(&port->se, 0x3);
+ geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ACTIVE_ONLY);
geni_icc_set_bw(&port->se);
}
return uart_suspend_port(private_data->drv, uport);
@@ -1539,7 +1541,7 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
ret = uart_resume_port(private_data->drv, uport);
if (uart_console(uport)) {
- geni_icc_set_tag(&port->se, 0x7);
+ geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
}
return ret;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index feb2054aba37..0e387e2144fa 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -238,7 +238,7 @@ static void rda_uart_change_baudrate(struct rda_uart_port *rda_port,
static void rda_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct rda_uart_port *rda_port = to_rda_uart_port(port);
unsigned long flags;
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 6689d8add8f7..b81afb06f1f4 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -370,9 +370,8 @@ static void __rp2_uart_set_termios(struct rp2_uart_port *up,
up->ucode + RP2_RX_SWFLOW);
}
-static void rp2_uart_set_termios(struct uart_port *port,
- struct ktermios *new,
- struct ktermios *old)
+static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
struct rp2_uart_port *up = port_to_up(port);
unsigned long flags;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index e64e42a19d1a..dd9e3253cab4 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -409,7 +409,7 @@ static void sa1100_shutdown(struct uart_port *port)
static void
sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index b7a4b47ce74e..77d1363029f5 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -1530,7 +1530,7 @@ static const u16 udivslot_table[16] = {
static void s3c24xx_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
const struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 2cf8533ef760..c5d2b6cdcb4a 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -531,7 +531,7 @@ static void sbd_init_port(struct sbd_port *sport)
}
static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mode1 = 0, mode2 = 0, aux = 0;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index d983692c59e0..524921360ca7 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1015,7 +1015,7 @@ static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
static void sc16is7xx_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index c56de2e104d4..dd98509f52e5 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -636,7 +636,8 @@ static void sccnxp_break_ctl(struct uart_port *port, int break_state)
}
static void sccnxp_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index a5748e41483b..b7170cb9a544 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1270,14 +1270,14 @@ static void tegra_uart_enable_ms(struct uart_port *u)
}
static void tegra_uart_set_termios(struct uart_port *u,
- struct ktermios *termios, struct ktermios *oldtermios)
+ struct ktermios *termios,
+ const struct ktermios *oldtermios)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned int baud;
unsigned long flags;
unsigned int lcr;
unsigned char char_bits;
- int symb_bit = 1;
struct clk *parent_clk = clk_get_parent(tup->uart_clk);
unsigned long parent_clk_rate = clk_get_rate(parent_clk);
int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
@@ -1304,7 +1304,6 @@ static void tegra_uart_set_termios(struct uart_port *u,
termios->c_cflag &= ~CMSPAR;
if ((termios->c_cflag & PARENB) == PARENB) {
- symb_bit++;
if (termios->c_cflag & PARODD) {
lcr |= UART_LCR_PARITY;
lcr &= ~UART_LCR_EPAR;
@@ -1317,22 +1316,18 @@ static void tegra_uart_set_termios(struct uart_port *u,
}
char_bits = tty_get_char_size(termios->c_cflag);
- symb_bit += char_bits;
lcr &= ~UART_LCR_WLEN8;
lcr |= UART_LCR_WLEN(char_bits);
/* Stop bits */
- if (termios->c_cflag & CSTOPB) {
+ if (termios->c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
- symb_bit += 2;
- } else {
+ else
lcr &= ~UART_LCR_STOP;
- symb_bit++;
- }
tegra_uart_write(tup, lcr, UART_LCR);
tup->lcr_shadow = lcr;
- tup->symb_bit = symb_bit;
+ tup->symb_bit = tty_get_frame_size(termios->c_cflag);
/* Baud rate. */
baud = uart_get_baud_rate(u, termios, oldtermios,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 12c87cd201a7..179ee199df34 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -49,7 +49,7 @@ static struct lock_class_key port_lock_key;
#define RS485_MAX_RTS_DELAY 100 /* msecs */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state);
@@ -158,15 +158,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
- if (old != port->mctrl)
+ if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -380,7 +375,7 @@ EXPORT_SYMBOL(uart_update_timeout);
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min, unsigned int max)
+ const struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try;
unsigned int baud;
@@ -492,7 +487,7 @@ EXPORT_SYMBOL(uart_get_divisor);
/* Caller holds port mutex */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct uart_port *uport = uart_port_check(state);
struct ktermios *termios;
@@ -1391,7 +1386,7 @@ static void uart_set_rs485_termination(struct uart_port *port,
!!(rs485->flags & SER_RS485_TERMINATE_BUS));
}
-int uart_rs485_config(struct uart_port *port)
+static int uart_rs485_config(struct uart_port *port)
{
struct serial_rs485 *rs485 = &port->rs485;
int ret;
@@ -1405,7 +1400,6 @@ int uart_rs485_config(struct uart_port *port)
return ret;
}
-EXPORT_SYMBOL_GPL(uart_rs485_config);
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
@@ -1444,8 +1438,13 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &tty->termios, &rs485);
- if (!ret)
+ if (!ret) {
port->rs485 = rs485;
+
+ /* Reset RTS and other mctrl lines when disabling RS485 */
+ if (!(rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ }
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
@@ -1619,7 +1618,7 @@ static void uart_set_ldisc(struct tty_struct *tty)
}
static void uart_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport;
@@ -2352,7 +2351,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
- ops->set_mctrl(uport, 0);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
/* save mctrl so it can be restored on resume */
mctrl = uport->mctrl;
uport->mctrl = 0;
@@ -2440,7 +2440,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, 0);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
@@ -2451,7 +2452,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (tty)
uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, uport->mctrl);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, uport->mctrl);
+ else
+ uart_rs485_config(uport);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
tty_port_set_initialized(port, 1);
@@ -2497,7 +2501,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
- strlcpy(address, "*unknown*", sizeof(address));
+ strscpy(address, "*unknown*", sizeof(address));
break;
}
@@ -2558,10 +2562,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
- if (port->rs485.flags & SER_RS485_ENABLED &&
- !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
- port->mctrl |= TIOCM_RTS;
- port->ops->set_mctrl(port, port->mctrl);
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ else
+ uart_rs485_config(port);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 228e380db080..e12f1dc18c38 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -594,7 +594,7 @@ static void serial_txx9_shutdown(struct uart_port *up)
static void
serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int cval, fcr = 0;
unsigned long flags;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0075a1420005..62f773286d44 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1407,10 +1407,8 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
spin_lock_irq(&port->lock);
head = xmit->head;
tail = xmit->tail;
- buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
- s->tx_dma_len = min_t(unsigned int,
- CIRC_CNT(head, tail, UART_XMIT_SIZE),
- CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+ buf = s->tx_dma_addr + tail;
+ s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
if (!s->tx_dma_len) {
/* Transmit buffer has been flushed */
spin_unlock_irq(&port->lock);
@@ -2367,7 +2365,7 @@ static void sci_reset(struct uart_port *port)
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits;
unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 4b1d4fe8458e..7fb6760b5c37 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -646,7 +646,7 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
static void sifive_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
unsigned long flags;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 4329b9c9cbf0..342a87967631 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -771,9 +771,8 @@ static void sprd_shutdown(struct uart_port *port)
devm_free_irq(port->dev, port->irq, port);
}
-static void sprd_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud, quot;
unsigned int lcr = 0, fc;
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index cce42f4c9bc2..fcecea689a0d 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -500,7 +500,7 @@ static void asc_pm(struct uart_port *port, unsigned int state,
}
static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
struct gpio_desc *gpiod;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2c85dbf165c4..dfdbcf092fac 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -37,7 +37,7 @@
/* Register offsets */
-static struct stm32_usart_info stm32f4_info = {
+static struct stm32_usart_info __maybe_unused stm32f4_info = {
.ofs = {
.isr = 0x00,
.rdr = 0x04,
@@ -58,7 +58,7 @@ static struct stm32_usart_info stm32f4_info = {
}
};
-static struct stm32_usart_info stm32f7_info = {
+static struct stm32_usart_info __maybe_unused stm32f7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
@@ -80,7 +80,7 @@ static struct stm32_usart_info stm32f7_info = {
}
};
-static struct stm32_usart_info stm32h7_info = {
+static struct stm32_usart_info __maybe_unused stm32h7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
@@ -131,6 +131,53 @@ static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
writel_relaxed(val, port->membase + reg);
}
+static unsigned int stm32_usart_tx_empty(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ }
+}
+
+static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ }
+}
+
static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
u32 delay_DDE, u32 baud)
{
@@ -214,6 +261,12 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ /* Adjust RTS polarity in case it's driven in software */
+ if (stm32_usart_tx_empty(port))
+ stm32_usart_rs485_rts_disable(port);
+ else
+ stm32_usart_rs485_rts_enable(port);
+
return 0;
}
@@ -529,42 +582,6 @@ static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
}
-static void stm32_usart_rs485_rts_enable(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
-
- if (stm32_port->hw_flow_control ||
- !(rs485conf->flags & SER_RS485_ENABLED))
- return;
-
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- }
-}
-
-static void stm32_usart_rs485_rts_disable(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
-
- if (stm32_port->hw_flow_control ||
- !(rs485conf->flags & SER_RS485_ENABLED))
- return;
-
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- }
-}
-
static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -807,17 +824,6 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
-static unsigned int stm32_usart_tx_empty(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
-
- if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1089,7 +1095,7 @@ static void stm32_usart_shutdown(struct uart_port *port)
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index eafada8fb6fa..1938ba5e98c0 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -323,7 +323,7 @@ static void sunhv_shutdown(struct uart_port *port)
/* port->lock is not held. */
static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 60c73662f955..7afe61a0e72e 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -333,7 +333,7 @@ static void sunplus_shutdown(struct uart_port *port)
static void sunplus_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *oldtermios)
+ const struct ktermios *oldtermios)
{
u32 ext, div, div_l, div_h, baud, lcr;
u32 clk = port->uartclk;
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 6ea52293d9f3..99608b2a2b74 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -681,27 +681,23 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
unsigned int quot)
{
unsigned char dafo;
- int bits, n, m;
+ int n, m;
/* Byte size and parity */
switch (cflag & CSIZE) {
- case CS5: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
- case CS6: dafo = SAB82532_DAFO_CHL6; bits = 8; break;
- case CS7: dafo = SAB82532_DAFO_CHL7; bits = 9; break;
- case CS8: dafo = SAB82532_DAFO_CHL8; bits = 10; break;
+ case CS5: dafo = SAB82532_DAFO_CHL5; break;
+ case CS6: dafo = SAB82532_DAFO_CHL6; break;
+ case CS7: dafo = SAB82532_DAFO_CHL7; break;
+ case CS8: dafo = SAB82532_DAFO_CHL8; break;
/* Never happens, but GCC is too dumb to figure it out */
- default: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
+ default: dafo = SAB82532_DAFO_CHL5; break;
}
- if (cflag & CSTOPB) {
+ if (cflag & CSTOPB)
dafo |= SAB82532_DAFO_STOP;
- bits++;
- }
- if (cflag & PARENB) {
+ if (cflag & PARENB)
dafo |= SAB82532_DAFO_PARE;
- bits++;
- }
if (cflag & PARODD) {
dafo |= SAB82532_DAFO_PAR_ODD;
@@ -776,7 +772,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
/* port->lock is not held. */
static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_sunsab_port *up =
container_of(port, struct uart_sunsab_port, port);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 84d545e5a8c7..9ea7e567540d 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -897,7 +897,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
static void
sunsu_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, quot;
@@ -1217,13 +1217,13 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
serio->id.type = SERIO_RS232;
if (up->su_type == SU_PORT_KBD) {
serio->id.proto = SERIO_SUNKBD;
- strlcpy(serio->name, "sukbd", sizeof(serio->name));
+ strscpy(serio->name, "sukbd", sizeof(serio->name));
} else {
serio->id.proto = SERIO_SUN;
serio->id.extra = 1;
- strlcpy(serio->name, "sums", sizeof(serio->name));
+ strscpy(serio->name, "sums", sizeof(serio->name));
}
- strlcpy(serio->phys,
+ strscpy(serio->phys,
(!(up->port.line & 1) ? "su/serio0" : "su/serio1"),
sizeof(serio->phys));
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c14275d83b0b..87425290687d 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -938,7 +938,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
/* The port lock is not held. */
static void
sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_sunzilog_port *up =
container_of(port, struct uart_sunzilog_port, port);
@@ -1307,13 +1307,13 @@ static void sunzilog_register_serio(struct uart_sunzilog_port *up)
serio->id.type = SERIO_RS232;
if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
serio->id.proto = SERIO_SUNKBD;
- strlcpy(serio->name, "zskbd", sizeof(serio->name));
+ strscpy(serio->name, "zskbd", sizeof(serio->name));
} else {
serio->id.proto = SERIO_SUN;
serio->id.extra = 1;
- strlcpy(serio->name, "zsms", sizeof(serio->name));
+ strscpy(serio->name, "zsms", sizeof(serio->name));
}
- strlcpy(serio->phys,
+ strscpy(serio->phys,
((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
"zs/serio0" : "zs/serio1"),
sizeof(serio->phys));
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 889b701ba7c6..23500b342da7 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -126,7 +126,7 @@ static void tegra_tcu_uart_shutdown(struct uart_port *port)
static void tegra_tcu_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
}
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 08941eabe7b1..bb19ed012def 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -275,8 +275,8 @@ static int get_bindex(int baud)
}
static void timbuart_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud;
short bindex;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 880e2afbb97b..eca41ac5477c 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -314,8 +314,9 @@ static void ulite_shutdown(struct uart_port *port)
clk_disable(pdata->clk);
}
-static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void ulite_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned long flags;
struct uartlite_data *pdata = port->private_data;
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3cc9ef08455c..82cf14dd3d43 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -843,7 +843,8 @@ static void qe_uart_shutdown(struct uart_port *port)
* Set the serial port parameters.
*/
static void qe_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
@@ -853,13 +854,6 @@ static void qe_uart_set_termios(struct uart_port *port,
u16 upsmr = ioread16be(&uccp->upsmr);
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
u16 supsmr = ioread16be(&uccup->supsmr);
- u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
-
- /* Character length programmed into the mode register is the
- * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
- * 1 or 2 stop bits, minus 1.
- * The value 'bits' counts this for us.
- */
/* byte size */
upsmr &= UCC_UART_UPSMR_CL_MASK;
@@ -869,22 +863,18 @@ static void qe_uart_set_termios(struct uart_port *port,
case CS5:
upsmr |= UCC_UART_UPSMR_CL_5;
supsmr |= UCC_UART_SUPSMR_CL_5;
- char_length += 5;
break;
case CS6:
upsmr |= UCC_UART_UPSMR_CL_6;
supsmr |= UCC_UART_SUPSMR_CL_6;
- char_length += 6;
break;
case CS7:
upsmr |= UCC_UART_UPSMR_CL_7;
supsmr |= UCC_UART_SUPSMR_CL_7;
- char_length += 7;
break;
default: /* case CS8 */
upsmr |= UCC_UART_UPSMR_CL_8;
supsmr |= UCC_UART_SUPSMR_CL_8;
- char_length += 8;
break;
}
@@ -892,13 +882,11 @@ static void qe_uart_set_termios(struct uart_port *port,
if (termios->c_cflag & CSTOPB) {
upsmr |= UCC_UART_UPSMR_SL;
supsmr |= UCC_UART_SUPSMR_SL;
- char_length++; /* + SL */
}
if (termios->c_cflag & PARENB) {
upsmr |= UCC_UART_UPSMR_PEN;
supsmr |= UCC_UART_SUPSMR_PEN;
- char_length++; /* + PEN */
if (!(termios->c_cflag & PARODD)) {
upsmr &= ~(UCC_UART_UPSMR_RPM_MASK |
@@ -953,7 +941,7 @@ static void qe_uart_set_termios(struct uart_port *port,
iowrite16be(upsmr, &uccp->upsmr);
if (soft_uart) {
iowrite16be(supsmr, &uccup->supsmr);
- iowrite8(char_length, &uccup->rx_length);
+ iowrite8(tty_get_frame_size(termios->c_cflag), &uccup->rx_length);
/* Soft-UART requires a 1X multiplier for TX */
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 6f08136ce78a..10fbdb09965f 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -187,6 +187,13 @@ static void handle_rx(struct uart_port *port)
tty_flip_buffer_push(tport);
}
+static unsigned int vt8500_tx_empty(struct uart_port *port)
+{
+ unsigned int idx = vt8500_read(port, VT8500_URFIDX) & 0x1f;
+
+ return idx < 16 ? TIOCSER_TEMT : 0;
+}
+
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -201,7 +208,7 @@ static void handle_tx(struct uart_port *port)
return;
}
- while ((vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16) {
+ while (vt8500_tx_empty(port)) {
if (uart_circ_empty(xmit))
break;
@@ -260,12 +267,6 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static unsigned int vt8500_tx_empty(struct uart_port *port)
-{
- return (vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16 ?
- TIOCSER_TEMT : 0;
-}
-
static unsigned int vt8500_get_mctrl(struct uart_port *port)
{
unsigned int usr;
@@ -355,7 +356,7 @@ static void vt8500_shutdown(struct uart_port *port)
static void vt8500_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 9e01fe6c0ab8..2eff7cff57c4 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -2,7 +2,7 @@
/*
* Cadence UART driver (found in Xilinx Zynq)
*
- * 2011 - 2014 (C) Xilinx Inc.
+ * Copyright (c) 2011 - 2014 Xilinx, Inc.
*
* This driver has originally been pushed by Xilinx using a Zynq-branding. This
* still shows in the naming of this file, the kconfig symbols and some symbols
@@ -361,6 +361,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
@@ -675,7 +677,8 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
* @old: Values of the previously saved termios structure
*/
static void cdns_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
@@ -1130,8 +1133,35 @@ static struct uart_driver cdns_uart_uart_driver;
*/
static void cdns_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
- while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)
+ unsigned int ctrl_reg;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ if (!(ctrl_reg & CDNS_UART_CR_TX_DIS))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev,
+ "timeout waiting for Enable\n");
+ return;
+ }
+ cpu_relax();
+ }
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ ctrl_reg = readl(port->membase + CDNS_UART_SR);
+
+ if (!(ctrl_reg & CDNS_UART_SR_TXFULL))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev,
+ "timeout waiting for TX fifo\n");
+ return;
+ }
cpu_relax();
+ }
writel(ch, port->membase + CDNS_UART_FIFO);
}
@@ -1329,12 +1359,20 @@ static int cdns_uart_resume(struct device *device)
unsigned long flags;
u32 ctrl_reg;
int may_wake;
+ int ret;
may_wake = device_may_wakeup(device);
if (console_suspend_enabled && uart_console(port) && !may_wake) {
- clk_enable(cdns_uart->pclk);
- clk_enable(cdns_uart->uartclk);
+ ret = clk_enable(cdns_uart->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(cdns_uart->uartclk);
+ if (ret) {
+ clk_disable(cdns_uart->pclk);
+ return ret;
+ }
spin_lock_irqsave(&port->lock, flags);
@@ -1383,9 +1421,17 @@ static int __maybe_unused cdns_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct cdns_uart *cdns_uart = port->private_data;
+ int ret;
- clk_enable(cdns_uart->pclk);
- clk_enable(cdns_uart->uartclk);
+ ret = clk_enable(cdns_uart->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(cdns_uart->uartclk);
+ if (ret) {
+ clk_disable(cdns_uart->pclk);
+ return ret;
+ }
return 0;
};
@@ -1551,6 +1597,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
+ port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 5bc58591665a..688db7d8b748 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -846,7 +846,7 @@ static void zs_reset(struct zs_port *zport)
}
static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 9bc2a9265277..25e9befdda3a 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -91,7 +91,6 @@ static char *driver_name = "SyncLink GT";
static char *slgt_driver_name = "synclink_gt";
static char *tty_dev_prefix = "ttySLG";
MODULE_LICENSE("GPL");
-#define MGSL_MAGIC 0x5401
#define MAX_DEVICES 32
static const struct pci_device_id pci_table[] = {
@@ -215,8 +214,6 @@ struct slgt_info {
struct slgt_info *next_device; /* device list link */
- int magic;
-
char device_name[25];
struct pci_dev *pdev;
@@ -554,10 +551,6 @@ static inline int sanity_check(struct slgt_info *info, char *devname, const char
printk("null struct slgt_info for (%s) in %s\n", devname, name);
return 1;
}
- if (info->magic != MGSL_MAGIC) {
- printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
- return 1;
- }
#else
if (!info)
return 1;
@@ -707,7 +700,8 @@ static void hangup(struct tty_struct *tty)
wake_up_interruptible(&info->port.open_wait);
}
-static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
@@ -3498,7 +3492,6 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
} else {
tty_port_init(&info->port);
info->port.ops = &slgt_port_ops;
- info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->base_clock = 14745600;
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index f310a8274df1..1c08c9b67b16 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -73,7 +73,7 @@ void tty_buffer_set_lock_subclass(struct tty_port *port);
bool tty_buffer_restart_work(struct tty_port *port);
bool tty_buffer_cancel_work(struct tty_port *port);
void tty_buffer_flush_work(struct tty_port *port);
-speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+speed_t tty_termios_input_baud_rate(const struct ktermios *termios);
void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
int tty_ldisc_reinit(struct tty_struct *tty, int disc);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 3cd99ed7c710..f9b49939c27b 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -49,13 +49,13 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
+ * structure. Device drivers can call this function but should use
+ * ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
-speed_t tty_termios_baud_rate(struct ktermios *termios)
+speed_t tty_termios_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud;
@@ -67,11 +67,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~CBAUDEX;
- else
- cbaud += 15;
+ cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
@@ -83,30 +79,26 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
+ * structure. Device drivers can call this function but should use
+ * ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
-speed_t tty_termios_input_baud_rate(struct ktermios *termios)
+speed_t tty_termios_input_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
if (cbaud == B0)
return tty_termios_baud_rate(termios);
- /* Magic token for arbitrary speed via c_ispeed*/
+ /* Magic token for arbitrary speed via c_ispeed */
if (cbaud == BOTHER)
return termios->c_ispeed;
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~(CBAUDEX << IBSHIFT);
- else
- cbaud += 15;
+ cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 82a8855981f7..de06c3c2ff70 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -99,8 +99,8 @@
#include <linux/serial.h>
#include <linux/ratelimit.h>
#include <linux/compat.h>
-
#include <linux/uaccess.h>
+#include <linux/termios_internal.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
@@ -170,7 +170,6 @@ static void free_tty_struct(struct tty_struct *tty)
tty_ldisc_deinit(tty);
put_device(tty->dev);
kvfree(tty->write_buf);
- tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -265,11 +264,6 @@ static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
imajor(inode), iminor(inode), routine);
return 1;
}
- if (tty->magic != TTY_MAGIC) {
- pr_warn("(%d:%d): %s: bad magic number\n",
- imajor(inode), iminor(inode), routine);
- return 1;
- }
#endif
return 0;
}
@@ -1533,7 +1527,6 @@ static void release_one_tty(struct work_struct *work)
if (tty->ops->cleanup)
tty->ops->cleanup(tty);
- tty->magic = 0;
tty_driver_kref_put(driver);
module_put(owner);
@@ -3093,7 +3086,6 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
return NULL;
kref_init(&tty->kref);
- tty->magic = TTY_MAGIC;
if (tty_ldisc_init(tty)) {
kfree(tty);
return NULL;
@@ -3329,7 +3321,6 @@ struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
return ERR_PTR(-ENOMEM);
kref_init(&driver->kref);
- driver->magic = TTY_DRIVER_MAGIC;
driver->num = lines;
driver->owner = owner;
driver->flags = flags;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 2a76b330e108..ce511557b98b 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -21,6 +21,7 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/compat.h>
+#include <linux/termios_internal.h>
#include "tty.h"
#include <asm/io.h>
@@ -219,7 +220,7 @@ EXPORT_SYMBOL(tty_wait_until_sent);
* Termios Helper Methods
*/
-static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old)
+static void unset_locked_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct ktermios *termios = &tty->termios;
struct ktermios *locked = &tty->termios_locked;
@@ -249,7 +250,7 @@ static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old)
* in some cases where only minimal reconfiguration is supported
*/
-void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old)
+void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old)
{
/* The bits a dumb device handles in software. Smart devices need
to always provide a set_termios method */
@@ -374,6 +375,80 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
}
EXPORT_SYMBOL_GPL(tty_set_termios);
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+__weak int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ struct termio v;
+
+ if (copy_from_user(&v, termio, sizeof(struct termio)))
+ return -EFAULT;
+
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | v.c_iflag;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | v.c_oflag;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | v.c_cflag;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | v.c_lflag;
+ termios->c_line = (0xffff0000 & termios->c_lflag) | v.c_line;
+ memcpy(termios->c_cc, v.c_cc, NCC);
+ return 0;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+__weak int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ struct termio v;
+ memset(&v, 0, sizeof(struct termio));
+ v.c_iflag = termios->c_iflag;
+ v.c_oflag = termios->c_oflag;
+ v.c_cflag = termios->c_cflag;
+ v.c_lflag = termios->c_lflag;
+ v.c_line = termios->c_line;
+ memcpy(v.c_cc, termios->c_cc, NCC);
+ return copy_to_user(termio, &v, sizeof(struct termio));
+}
+
+#ifdef TCGETS2
+__weak int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2));
+}
+__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2));
+}
+__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+
+#else
+
+__weak int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+__weak int kernel_termios_to_user_termios(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#endif /* TCGETS2 */
+
/**
* set_termios - set termios values for a tty
* @tty: terminal device
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 393518a24cfe..784e46a0a3b1 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -14,8 +14,6 @@
void tty_lock(struct tty_struct *tty)
{
- if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
- return;
tty_kref_get(tty);
mutex_lock(&tty->legacy_mutex);
}
@@ -25,8 +23,6 @@ int tty_lock_interruptible(struct tty_struct *tty)
{
int ret;
- if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
- return -EIO;
tty_kref_get(tty);
ret = mutex_lock_interruptible(&tty->legacy_mutex);
if (ret)
@@ -36,8 +32,6 @@ int tty_lock_interruptible(struct tty_struct *tty)
void tty_unlock(struct tty_struct *tty)
{
- if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
- return;
mutex_unlock(&tty->legacy_mutex);
tty_kref_put(tty);
}
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index e11383ae1e7e..34ba6e54789a 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -11,6 +11,7 @@
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/termios_internal.h>
#include <asm/vio.h>
#include <asm/ldc.h>
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 0b669c82ddc9..981d2bfcf9a5 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -154,10 +154,10 @@ static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
+static void unblank_screen(void);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
-static int printable; /* Is console ready for printing? */
int default_utf8 = true;
module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
@@ -3084,9 +3084,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
ushort start_x, cnt;
int kmsg_console;
- /* console busy or not yet initialized */
- if (!printable)
- return;
+ WARN_CONSOLE_UNLOCKED();
+
+ /* this protects against concurrent oops only */
if (!spin_trylock(&printing_lock))
return;
@@ -3537,7 +3537,6 @@ static int __init con_init(void)
pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
display_desc, vc->vc_cols, vc->vc_rows);
- printable = 1;
console_unlock();
@@ -4452,7 +4451,7 @@ EXPORT_SYMBOL(do_unblank_screen);
* call it with 1 as an argument and so force a mode restore... that may kill
* X or at least garbage the screen but would also make the Oops visible...
*/
-void unblank_screen(void)
+static void unblank_screen(void)
{
do_unblank_screen(0);
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0a088b47d557..53aea56d1de1 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -225,12 +225,13 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
unsigned int wb_enable;
ssize_t res;
- if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+ if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
+ && ufshcd_enable_wb_if_scaling_up(hba))) {
/*
* If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
* on/off will be done while clock scaling up/down.
*/
- dev_warn(dev, "To control WB through wb_on is not allowed!\n");
+ dev_warn(dev, "It is not allowed to configure WB!\n");
return -EOPNOTSUPP;
}
@@ -254,6 +255,49 @@ out:
return res < 0 ? res : count;
}
+static ssize_t enable_wb_buf_flush_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
+}
+
+static ssize_t enable_wb_buf_flush_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int enable_wb_buf_flush;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
+ dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &enable_wb_buf_flush))
+ return -EINVAL;
+
+ if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -262,6 +306,7 @@ static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
+static DEVICE_ATTR_RW(enable_wb_buf_flush);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -272,6 +317,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
&dev_attr_wb_on.attr,
+ &dev_attr_enable_wb_buf_flush.attr,
NULL
};
@@ -279,6 +325,40 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
+}
+
+static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
+}
+
+static DEVICE_ATTR_RO(clock_scaling);
+static DEVICE_ATTR_RO(write_booster);
+
+/*
+ * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
+ * group.
+ */
+static struct attribute *ufs_sysfs_capabilities_attrs[] = {
+ &dev_attr_clock_scaling.attr,
+ &dev_attr_write_booster.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_capabilities_group = {
+ .name = "capabilities",
+ .attrs = ufs_sysfs_capabilities_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1134,6 +1214,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 8f67db202d7b..f68ca33f6ac7 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -26,6 +26,12 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
+static inline bool ufshcd_is_wb_buf_flush_allowed(struct ufs_hba *hba)
+{
+ return ufshcd_is_wb_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL);
+}
+
#ifdef CONFIG_SCSI_UFS_HWMON
void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
void ufs_hwmon_remove(struct ufs_hba *hba);
@@ -36,6 +42,11 @@ static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
#endif
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len);
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 9538832b03a0..7256e6c43ca6 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/sched/clock.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
@@ -265,8 +266,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -286,16 +287,17 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
-static inline void ufshcd_wb_config(struct ufs_hba *hba)
+static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
return;
ufshcd_wb_toggle(hba, true);
- ufshcd_wb_toggle_flush_during_h8(hba, true);
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, true);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
+
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -457,7 +459,7 @@ static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- e->val[p], ktime_to_us(e->tstamp[p]));
+ e->val[p], div_u64(e->tstamp[p], 1000));
found = true;
}
@@ -502,9 +504,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, ktime_to_us(lrbp->issue_time_stamp));
+ tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, ktime_to_us(lrbp->compl_time_stamp));
+ tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -566,10 +568,10 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev,
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- ktime_to_us(hba->ufs_stats.last_intr_ts),
+ div_u64(hba->ufs_stats.last_intr_ts, 1000),
hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
@@ -1298,9 +1300,11 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
+ if (ufshcd_enable_wb_if_scaling_up(hba)) {
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
+ ufshcd_wb_toggle(hba, scale_up);
+ }
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, is_writelock);
@@ -2140,7 +2144,9 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -4219,7 +4225,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
hba->ufs_stats.hibern8_exit_cnt++;
}
@@ -4721,7 +4727,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
- e->tstamp[e->pos] = ktime_get();
+ e->tstamp[e->pos] = local_clock();
e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
@@ -5354,6 +5360,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -5749,60 +5756,60 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
{
int ret;
- if (!ufshcd_is_wb_allowed(hba))
- return 0;
-
- if (!(enable ^ hba->dev_info.wb_enabled))
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_enabled == enable)
return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
if (ret) {
- dev_err(hba->dev, "%s Write Booster %s failed %d\n",
- __func__, enable ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return ret;
}
hba->dev_info.wb_enabled = enable;
- dev_dbg(hba->dev, "%s Write Booster %s\n",
+ dev_dbg(hba->dev, "%s: Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
}
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable)
{
int ret;
- ret = __ufshcd_wb_toggle(hba, set,
+ ret = __ufshcd_wb_toggle(hba, enable,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
if (ret) {
- dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
- __func__, set ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return;
}
- dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
- __func__, set ? "enabled" : "disabled");
+ dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
+ __func__, enable ? "enabled" : "disabled");
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
{
int ret;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_buf_flush_enabled == enable)
- return;
+ return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
if (ret) {
- dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
- enable ? "enable" : "disable", ret);
- return;
+ dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
+ return ret;
}
hba->dev_info.wb_buf_flush_enabled = enable;
-
- dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
+ dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
__func__, enable ? "enabled" : "disabled");
+
+ return ret;
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5817,7 +5824,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
index, 0, &cur_buf);
if (ret) {
- dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -5833,10 +5840,10 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
static void ufshcd_wb_force_disable(struct ufs_hba *hba)
{
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, false);
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, false);
- ufshcd_wb_toggle_flush_during_h8(hba, false);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
ufshcd_wb_toggle(hba, false);
hba->caps &= ~UFSHCD_CAP_WB_EN;
@@ -5902,7 +5909,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
index, 0, &avail_buf);
if (ret) {
- dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -6642,7 +6649,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = ktime_get();
+ hba->ufs_stats.last_intr_ts = local_clock();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -8233,7 +8240,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
*/
ufshcd_set_active_icc_lvl(hba);
- ufshcd_wb_config(hba);
+ /* Enable UFS Write Booster if supported */
+ ufshcd_configure_wb(hba);
+
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
/* Enable Auto-Hibernate if configured */
diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
index 7e010848dc99..b5f2ec314074 100644
--- a/drivers/ufs/host/ufs-mediatek-trace.h
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -24,9 +24,32 @@ TRACE_EVENT(ufs_mtk_event,
__entry->data = data;
),
- TP_printk("ufs:event=%u data=%u",
+ TP_printk("ufs: event=%u data=%u",
__entry->type, __entry->data)
- );
+);
+
+TRACE_EVENT(ufs_mtk_clk_scale,
+ TP_PROTO(const char *name, bool scale_up, unsigned long clk_rate),
+ TP_ARGS(name, scale_up, clk_rate),
+
+ TP_STRUCT__entry(
+ __field(const char*, name)
+ __field(bool, scale_up)
+ __field(unsigned long, clk_rate)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->scale_up = scale_up;
+ __entry->clk_rate = clk_rate;
+ ),
+
+ TP_printk("ufs: clk (%s) scaled %s @ %lu",
+ __entry->name,
+ __entry->scale_up ? "up" : "down",
+ __entry->clk_rate)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index c958279bdd8f..7309f3f87eac 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -19,7 +19,6 @@
#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <ufs/ufshcd.h>
@@ -47,6 +46,44 @@ static const struct of_device_id ufs_mtk_of_match[] = {
{},
};
+/*
+ * Details of UIC Errors
+ */
+static const char *const ufs_uic_err_str[] = {
+ "PHY Adapter Layer",
+ "Data Link Layer",
+ "Network Link Layer",
+ "Transport Link Layer",
+ "DME"
+};
+
+static const char *const ufs_uic_pa_err_str[] = {
+ "PHY error on Lane 0",
+ "PHY error on Lane 1",
+ "PHY error on Lane 2",
+ "PHY error on Lane 3",
+ "Generic PHY Adapter Error. This should be the LINERESET indication"
+};
+
+static const char *const ufs_uic_dl_err_str[] = {
+ "NAC_RECEIVED",
+ "TCx_REPLAY_TIMER_EXPIRED",
+ "AFCx_REQUEST_TIMER_EXPIRED",
+ "FCx_PROTECTION_TIMER_EXPIRED",
+ "CRC_ERROR",
+ "RX_BUFFER_OVERFLOW",
+ "MAX_FRAME_LENGTH_EXCEEDED",
+ "WRONG_SEQUENCE_NUMBER",
+ "AFC_FRAME_SYNTAX_ERROR",
+ "NAC_FRAME_SYNTAX_ERROR",
+ "EOF_SYNTAX_ERROR",
+ "FRAME_SYNTAX_ERROR",
+ "BAD_CTRL_SYMBOL_TYPE",
+ "PA_INIT_ERROR",
+ "PA_ERROR_IND_RECEIVED",
+ "PA_INIT"
+};
+
static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -598,6 +635,12 @@ static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
boost ? 0 : PM_QOS_DEFAULT_VALUE);
}
+static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
+{
+ ufs_mtk_boost_crypt(hba, scale_up);
+ ufs_mtk_boost_pm_qos(hba, scale_up);
+}
+
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -605,11 +648,11 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
if (on) {
phy_power_on(host->mphy);
ufs_mtk_setup_ref_clk(hba, on);
- ufs_mtk_boost_crypt(hba, on);
- ufs_mtk_boost_pm_qos(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
} else {
- ufs_mtk_boost_pm_qos(hba, on);
- ufs_mtk_boost_crypt(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
}
@@ -695,6 +738,46 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+/**
+ * ufs_mtk_init_clocks - Init mtk driver private clocks
+ *
+ * @hba: per adapter instance
+ */
+static void ufs_mtk_init_clocks(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki, *clki_tmp;
+
+ /*
+ * Find private clocks and store them in struct ufs_mtk_clk.
+ * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
+ * being switched on/off in clock gating.
+ */
+ list_for_each_entry_safe(clki, clki_tmp, head, list) {
+ if (!strcmp(clki->name, "ufs_sel")) {
+ host->mclk.ufs_sel_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
+ host->mclk.ufs_sel_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
+ host->mclk.ufs_sel_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ }
+ }
+
+ if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
+ !mclk->ufs_sel_min_clki) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+ dev_info(hba->dev,
+ "%s: Clk-scaling not ready. Feature disabled.",
+ __func__);
+ }
+}
+
#define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
{
@@ -815,12 +898,18 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Enable clk scaling*/
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ ufs_mtk_init_clocks(hba);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -833,6 +922,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ /* Initialize pm-qos request */
+ cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ host->pm_qos_init = true;
+
goto out;
out_variant_clear:
@@ -1247,13 +1340,16 @@ fail:
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
{
- ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+ /* Dump ufshci register 0x140 ~ 0x14C */
+ ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
+ "XOUFS Ctrl (0x140): ");
ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+ /* Dump ufshci register 0x2200 ~ 0x22AC */
ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
- "MPHY Ctrl ");
+ "MPHY Ctrl (0x2200): ");
/* Direct debugging information to REG_MTK_PROBE */
ufs_mtk_dbg_sel(hba);
@@ -1310,8 +1406,101 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
enum ufs_event_type evt, void *data)
{
unsigned int val = *(u32 *)data;
+ unsigned long reg;
+ u8 bit;
trace_ufs_mtk_event(evt, val);
+
+ /* Print details of UIC Errors */
+ if (evt <= UFS_EVT_DME_ERR) {
+ dev_info(hba->dev,
+ "Host UIC Error Code (%s): %08x\n",
+ ufs_uic_err_str[evt], val);
+ reg = val;
+ }
+
+ if (evt == UFS_EVT_PA_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
+ }
+
+ if (evt == UFS_EVT_DL_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
+ }
+}
+
+static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data)
+{
+ /* Customize min gear in clk scaling */
+ hba->clk_scaling.min_gear = UFS_HS_G4;
+
+ hba->vps->devfreq_profile.polling_ms = 200;
+ hba->vps->ondemand_data.upthreshold = 50;
+ hba->vps->ondemand_data.downdifferential = 20;
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+ int ret = 0;
+
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+
+ if (scale_up) {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
+ clki->curr_freq = clki->max_freq;
+ } else {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
+ clki->curr_freq = clki->min_freq;
+ }
+
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ }
+
+ clk_disable_unprepare(clki->clk);
+
+ trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
+}
+
+static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ enum ufs_notify_change_status status)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return 0;
+
+ if (status == PRE_CHANGE) {
+ /* Switch parent before clk_set_rate() */
+ ufs_mtk_clk_scale(hba, scale_up);
+ } else {
+ /* Request interrupt latency QoS accordingly */
+ ufs_mtk_scale_perf(hba, scale_up);
+ }
+
+ return 0;
}
/*
@@ -1335,6 +1524,8 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.dbg_register_dump = ufs_mtk_dbg_register_dump,
.device_reset = ufs_mtk_device_reset,
.event_notify = ufs_mtk_event_notify,
+ .config_scaling_param = ufs_mtk_config_scaling_param,
+ .clk_scale_notify = ufs_mtk_clk_scale_notify,
};
/**
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index aa26d415527b..2fc6d7b87694 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -124,6 +124,12 @@ struct ufs_mtk_crypt_cfg {
int vcore_volt;
};
+struct ufs_mtk_clk {
+ struct ufs_clk_info *ufs_sel_clki; /* Mux */
+ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+};
+
struct ufs_mtk_hw_ver {
u8 step;
u8 minor;
@@ -139,6 +145,7 @@ struct ufs_mtk_host {
struct reset_control *crypto_reset;
struct ufs_hba *hba;
struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_clk mclk;
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 473fad83701e..8ad1415e10b6 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -846,7 +846,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_CRYPTO;
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
index 8f39cc8bb034..69e93f3e7faf 100644
--- a/drivers/uio/uio_dfl.c
+++ b/drivers/uio/uio_dfl.c
@@ -46,10 +46,12 @@ static int uio_dfl_probe(struct dfl_device *ddev)
#define FME_FEATURE_ID_ETH_GROUP 0x10
#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
+#define PORT_FEATURE_ID_IOPLL_USRCLK 0x14
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
{ FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
+ { PORT_ID, PORT_FEATURE_ID_IOPLL_USRCLK },
{ }
};
MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 362217189ef3..1cdb8758ae01 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1026,7 +1026,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
/* public fields */
instance->driver = driver;
- strlcpy(instance->driver_name, driver->driver_name,
+ strscpy(instance->driver_name, driver->driver_name,
sizeof(instance->driver_name));
instance->usb_dev = usb_dev;
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index dc068e940ed5..2bc5d094548b 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -110,8 +110,6 @@ static int cdns3_plat_probe(struct platform_device *pdev)
cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
if (cdns->wakeup_irq == -EPROBE_DEFER)
return cdns->wakeup_irq;
- else if (cdns->wakeup_irq == 0)
- return -EINVAL;
if (cdns->wakeup_irq < 0) {
dev_dbg(dev, "couldn't get wakeup irq\n");
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 661818e8fed6..c815824a0b2d 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -34,26 +34,26 @@ config USB_CHIPIDEA_HOST
ChipIdea driver.
config USB_CHIPIDEA_PCI
- tristate "Enable PCI glue driver" if EMBEDDED
+ tristate "Enable PCI glue driver" if EXPERT
depends on USB_PCI
depends on NOP_USB_XCEIV
default USB_CHIPIDEA
config USB_CHIPIDEA_MSM
- tristate "Enable MSM hsusb glue driver" if EMBEDDED
+ tristate "Enable MSM hsusb glue driver" if EXPERT
default USB_CHIPIDEA
config USB_CHIPIDEA_IMX
- tristate "Enable i.MX USB glue driver" if EMBEDDED
+ tristate "Enable i.MX USB glue driver" if EXPERT
depends on OF
default USB_CHIPIDEA
config USB_CHIPIDEA_GENERIC
- tristate "Enable generic USB2 glue driver" if EMBEDDED
+ tristate "Enable generic USB2 glue driver" if EXPERT
default USB_CHIPIDEA
config USB_CHIPIDEA_TEGRA
- tristate "Enable Tegra USB glue driver" if EMBEDDED
+ tristate "Enable Tegra USB glue driver" if EXPERT
depends on OF
default USB_CHIPIDEA
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 89e1d82d739b..dc86b12060b5 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -30,6 +30,7 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct ci_hdrc_platform_data ci_zevio_pdata = {
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index bdc3885c0d49..bc3634a54c6b 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -63,6 +63,13 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
priv->enabled = enable;
}
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL) {
+ if (enable)
+ usb_phy_vbus_on(ci->usb_phy);
+ else
+ usb_phy_vbus_off(ci->usb_phy);
+ }
+
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 61b157b9c662..ada78daba6df 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -471,6 +471,10 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
return;
}
}
+
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
+ usb_phy_vbus_on(ci->usb_phy);
+
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
@@ -480,6 +484,9 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
+ usb_phy_vbus_off(ci->usb_phy);
+
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index cc637c4599e1..36bf051b345b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,7 +51,7 @@ static DEFINE_IDR(acm_minors);
static DEFINE_MUTEX(acm_minors_lock);
static void acm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old);
+ const struct ktermios *termios_old);
/*
* acm_minors accessors
@@ -1049,7 +1049,7 @@ static int acm_tty_ioctl(struct tty_struct *tty,
}
static void acm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old)
+ const struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = &tty->termios;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index eebe782380fb..1f0951be15ab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -958,7 +958,7 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
if (!skb)
return;
- memcpy(skb_put(skb, length), desc->inbuf, length);
+ skb_put_data(skb, desc->inbuf, length);
wwan_port_rx(port, skb);
/* inbuf has been copied, it is safe to check for outstanding data */
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
index 075f6b1b2a1a..f204cec8d380 100644
--- a/drivers/usb/common/debug.c
+++ b/drivers/usb/common/debug.c
@@ -208,30 +208,28 @@ static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size)
snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
}
-/**
- * usb_decode_ctrl - Returns human readable representation of control request.
- * @str: buffer to return a human-readable representation of control request.
- * This buffer should have about 200 bytes.
- * @size: size of str buffer.
- * @bRequestType: matches the USB bmRequestType field
- * @bRequest: matches the USB bRequest field
- * @wValue: matches the USB wValue field (CPU byte order)
- * @wIndex: matches the USB wIndex field (CPU byte order)
- * @wLength: matches the USB wLength field (CPU byte order)
- *
- * Function returns decoded, formatted and human-readable description of
- * control request packet.
- *
- * The usage scenario for this is for tracepoints, so function as a return
- * use the same value as in parameters. This approach allows to use this
- * function in TP_printk
- *
- * Important: wValue, wIndex, wLength parameters before invoking this function
- * should be processed by le16_to_cpu macro.
- */
-const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
- __u8 bRequest, __u16 wValue, __u16 wIndex,
- __u16 wLength)
+static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ u8 recip = bRequestType & USB_RECIP_MASK;
+ u8 type = bRequestType & USB_TYPE_MASK;
+
+ snprintf(str, size,
+ "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u",
+ (type == USB_TYPE_STANDARD) ? "Standard" :
+ (type == USB_TYPE_VENDOR) ? "Vendor" :
+ (type == USB_TYPE_CLASS) ? "Class" : "Unknown",
+ (recip == USB_RECIP_DEVICE) ? "Device" :
+ (recip == USB_RECIP_INTERFACE) ? "Interface" :
+ (recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown",
+ (bRequestType & USB_DIR_IN) ? "IN" : "OUT",
+ bRequest, wValue, wIndex, wLength);
+}
+
+static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
{
switch (bRequest) {
case USB_REQ_GET_STATUS:
@@ -272,14 +270,48 @@ const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
usb_decode_set_isoch_delay(wValue, str, size);
break;
default:
- snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
- bRequestType, bRequest,
- (u8)(cpu_to_le16(wValue) & 0xff),
- (u8)(cpu_to_le16(wValue) >> 8),
- (u8)(cpu_to_le16(wIndex) & 0xff),
- (u8)(cpu_to_le16(wIndex) >> 8),
- (u8)(cpu_to_le16(wLength) & 0xff),
- (u8)(cpu_to_le16(wLength) >> 8));
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ }
+}
+
+/**
+ * usb_decode_ctrl - Returns human readable representation of control request.
+ * @str: buffer to return a human-readable representation of control request.
+ * This buffer should have about 200 bytes.
+ * @size: size of str buffer.
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field (CPU byte order)
+ * @wIndex: matches the USB wIndex field (CPU byte order)
+ * @wLength: matches the USB wLength field (CPU byte order)
+ *
+ * Function returns decoded, formatted and human-readable description of
+ * control request packet.
+ *
+ * The usage scenario for this is for tracepoints, so function as a return
+ * use the same value as in parameters. This approach allows to use this
+ * function in TP_printk
+ *
+ * Important: wValue, wIndex, wLength parameters before invoking this function
+ * should be processed by le16_to_cpu macro.
+ */
+const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ switch (bRequestType & USB_TYPE_MASK) {
+ case USB_TYPE_STANDARD:
+ usb_decode_ctrl_standard(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ case USB_TYPE_VENDOR:
+ case USB_TYPE_CLASS:
+ default:
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
}
return str;
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0a4f441aff8f..d7c8461976ce 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -233,7 +233,7 @@ err:
return 0;
}
-static int ulpi_regs_read(struct seq_file *seq, void *data)
+static int ulpi_regs_show(struct seq_file *seq, void *data)
{
struct ulpi *ulpi = seq->private;
@@ -269,21 +269,7 @@ static int ulpi_regs_read(struct seq_file *seq, void *data)
return 0;
}
-
-static int ulpi_regs_open(struct inode *inode, struct file *f)
-{
- struct ulpi *ulpi = inode->i_private;
-
- return single_open(f, ulpi_regs_read, ulpi);
-}
-
-static const struct file_operations ulpi_regs_ops = {
- .owner = THIS_MODULE,
- .open = ulpi_regs_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
+DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
@@ -316,7 +302,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
}
root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
- debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_ops);
+ debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index b39c9f1c375d..e20874caba36 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -208,10 +208,8 @@ static int usb_conn_probe(struct platform_device *pdev)
if (PTR_ERR(info->vbus) == -ENODEV)
info->vbus = NULL;
- if (IS_ERR(info->vbus)) {
- ret = PTR_ERR(info->vbus);
- return dev_err_probe(dev, ret, "failed to get vbus :%d\n", ret);
- }
+ if (IS_ERR(info->vbus))
+ return dev_err_probe(dev, PTR_ERR(info->vbus), "failed to get vbus\n");
info->role_sw = usb_role_switch_get(dev);
if (IS_ERR(info->role_sw))
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b5b85bf80329..837f3e57f580 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,7 +1434,7 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
if (!intf || !intf->dev.driver)
ret = -ENODATA;
else {
- strlcpy(gd.driver, intf->dev.driver->name,
+ strscpy(gd.driver, intf->dev.driver->name,
sizeof(gd.driver));
ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 482dae72ef1c..9b77f49b3560 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -157,7 +157,6 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
/**
* usb_hcd_pci_probe - initialize PCI-based HCDs
* @dev: USB Host Controller being probed
- * @id: pci hotplug id connecting controller to HCD framework
* @driver: USB HC driver handle
*
* Context: task context, might sleep
@@ -170,8 +169,7 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
*
* Return: 0 if successful.
*/
-int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
- const struct hc_driver *driver)
+int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver)
{
struct usb_hcd *hcd;
int retval;
@@ -180,9 +178,6 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
if (usb_disabled())
return -ENODEV;
- if (!id)
- return -EINVAL;
-
if (!driver)
return -EINVAL;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 94b305bbd621..faeaace0d197 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1474,7 +1474,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
urb->sg,
urb->num_sgs,
dir);
- if (n <= 0)
+ if (!n)
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
@@ -2158,21 +2158,14 @@ static struct urb *request_single_step_set_feature_urb(
{
struct urb *urb;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- struct usb_host_endpoint *ep;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
urb->pipe = usb_rcvctrlpipe(udev, 0);
- ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
- [usb_pipeendpoint(urb->pipe)];
- if (!ep) {
- usb_free_urb(urb);
- return NULL;
- }
- urb->ep = ep;
+ urb->ep = &udev->ep0;
urb->dev = udev;
urb->setup_packet = (void *)dr;
urb->transfer_buffer = buf;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f99a65a64588..0722d2131305 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -388,6 +388,15 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* NVIDIA Jetson devices in Force Recovery mode */
+ { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
@@ -437,6 +446,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */
+ { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 33d62d7e3929..9f3c54032556 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/kmsan.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/hcd.h>
@@ -426,6 +427,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
URB_DMA_SG_COMBINED);
urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ kmsan_handle_urb(urb, is_out);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index dc4fc72ab1b6..5635e4d7ec88 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -3,36 +3,6 @@
* core.c - DesignWare HS OTG Controller common routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 0683852e47e4..40cf2880d7e5 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -3,36 +3,6 @@
* core.h - DesignWare HS OTG Controller common declarations
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DWC2_CORE_H__
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index a5c52b237e72..158ede753854 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -3,36 +3,6 @@
* core_intr.c - DesignWare HS OTG Controller common interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index aaf7b9fc4d34..657f1f659ffa 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3,36 +3,6 @@
* hcd.c - DesignWare HS OTG Controller host-mode routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index ea02ee63ac6d..b7254d94fdc3 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -3,37 +3,8 @@
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#ifndef __DWC2_HCD_H__
#define __DWC2_HCD_H__
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index a858b5f9c1d6..6b4d825e97a2 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -3,36 +3,6 @@
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index d5f4ec1b73b1..c9740caa5974 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -3,36 +3,6 @@
* hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 24beff610cf2..0a1145592fc7 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -3,36 +3,6 @@
* hcd_queue.c - DesignWare HS OTG Controller host queuing routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 6b16fbf98bc6..13abdd5f6752 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -3,36 +3,6 @@
* hw.h - DesignWare HS OTG Controller hardware definitions
*
* Copyright 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DWC2_HW_H__
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index fdb8a42fff86..8eab5f38b110 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -1,36 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2004-2016 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index a93559b4ecdb..b7306ed8be4c 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -3,36 +3,6 @@
* pci.c - DesignWare HS OTG Controller PCI driver
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index fd0ccf6f3ec5..ec4ace0107f5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -3,36 +3,6 @@
* platform.c - DesignWare HS OTG Controller platform driver
*
* Copyright (C) Matthijs Kooijman <matthijs@stdin.nl>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d0237b30c9be..ea51624461b5 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
@@ -86,7 +85,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* mode. If the controller supports DRD but the dr_mode is not
* specified or set to OTG, then set the mode to peripheral.
*/
- if (mode == USB_DR_MODE_OTG && !dwc->edev &&
+ if (mode == USB_DR_MODE_OTG &&
(!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
!device_property_read_bool(dwc->dev, "usb-role-switch")) &&
!DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -408,6 +407,10 @@ static void dwc3_ref_clk_period(struct dwc3 *dwc)
reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
+
+ if (dwc->gfladj_refclk_lpm_sel)
+ reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
@@ -789,7 +792,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
else
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
- if (dwc->dis_u2_freeclk_exists_quirk)
+ if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
@@ -1180,6 +1183,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. All the termsel, xcvrsel,
+ * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
+ * will correct this problem. This option is to support certain
+ * legacy ULPI PHYs.
+ */
+ if (dwc->resume_hs_terminations) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
@@ -1523,8 +1541,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
"snps,dis-del-phy-power-chg-quirk");
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
+ dwc->resume_hs_terminations = device_property_read_bool(dev,
+ "snps,resume-hs-terminations");
dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
"snps,parkmode-disable-ss-quirk");
+ dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
+ "snps,gfladj-refclk-lpm-sel-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -1668,46 +1690,6 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
}
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
- struct device *dev = dwc->dev;
- struct device_node *np_phy;
- struct extcon_dev *edev = NULL;
- const char *name;
-
- if (device_property_read_bool(dev, "extcon"))
- return extcon_get_edev_by_phandle(dev, 0);
-
- /*
- * Device tree platforms should get extcon via phandle.
- * On ACPI platforms, we get the name from a device property.
- * This device property is for kernel internal use only and
- * is expected to be set by the glue code.
- */
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
- return extcon_get_extcon_dev(name);
-
- /*
- * Try to get an extcon device from the USB PHY controller's "port"
- * node. Check if it has the "port" node first, to avoid printing the
- * error message from underlying code, as it's a valid case: extcon
- * device (and "port" node) may be missing in case of "usb-role-switch"
- * or OTG mode.
- */
- np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- if (of_graph_is_present(np_phy)) {
- struct device_node *np_conn;
-
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- of_node_put(np_conn);
- }
- of_node_put(np_phy);
-
- return edev;
-}
-
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1753,8 +1735,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
- if (IS_ERR(dwc->reset))
- return PTR_ERR(dwc->reset);
+ if (IS_ERR(dwc->reset)) {
+ ret = PTR_ERR(dwc->reset);
+ goto put_usb_psy;
+ }
if (dev->of_node) {
/*
@@ -1764,45 +1748,57 @@ static int dwc3_probe(struct platform_device *pdev)
* check for them to retain backwards compatibility.
*/
dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
- if (IS_ERR(dwc->bus_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
- "could not get bus clock\n");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
if (dwc->bus_clk == NULL) {
dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
- if (IS_ERR(dwc->bus_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
- "could not get bus clock\n");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
}
dwc->ref_clk = devm_clk_get_optional(dev, "ref");
- if (IS_ERR(dwc->ref_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
- "could not get ref clock\n");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
if (dwc->ref_clk == NULL) {
dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
- if (IS_ERR(dwc->ref_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
- "could not get ref clock\n");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
}
dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
- if (IS_ERR(dwc->susp_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
- "could not get suspend clock\n");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
if (dwc->susp_clk == NULL) {
dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
- if (IS_ERR(dwc->susp_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
- "could not get suspend clock\n");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
}
}
ret = reset_control_deassert(dwc->reset);
if (ret)
- return ret;
+ goto put_usb_psy;
ret = dwc3_clk_enable(dwc);
if (ret)
@@ -1844,13 +1840,6 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
- dwc->edev = dwc3_get_extcon(dwc);
- if (IS_ERR(dwc->edev)) {
- ret = PTR_ERR(dwc->edev);
- dev_err_probe(dwc->dev, ret, "failed to get extcon\n");
- goto err3;
- }
-
ret = dwc3_get_dr_mode(dwc);
if (ret)
goto err3;
@@ -1909,7 +1898,7 @@ disable_clks:
dwc3_clk_disable(dwc);
assert_reset:
reset_control_assert(dwc->reset);
-
+put_usb_psy:
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
@@ -1977,9 +1966,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
case DWC3_GCTL_PRTCAP_DEVICE:
if (pm_runtime_suspended(dwc->dev))
break;
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc);
break;
@@ -2040,9 +2027,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4fe4287dc934..8f9959ba9fd4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -263,6 +263,7 @@
#define DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK BIT(26)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
+#define DWC3_GUCTL1_RESUME_OPMODE_HS_HOST BIT(10)
/* Global Status Register */
#define DWC3_GSTS_OTG_IP BIT(10)
@@ -391,6 +392,7 @@
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
#define DWC3_GFLADJ_REFCLK_FLADJ_MASK GENMASK(21, 8)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL BIT(23)
#define DWC3_GFLADJ_240MHZDECR GENMASK(30, 24)
#define DWC3_GFLADJ_240MHZDECR_PLS1 BIT(31)
@@ -1096,6 +1098,8 @@ struct dwc3_scratchpad_array {
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
+ * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
@@ -1311,7 +1315,9 @@ struct dwc3 {
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
+ unsigned resume_hs_terminations:1;
unsigned parkmode_disable_ss_quirk:1;
+ unsigned gfladj_refclk_lpm_sel:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
@@ -1560,6 +1566,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
u32 param);
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index d223c54115f4..48b44b88dc25 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -278,7 +278,7 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
break;
case DWC3_DEPEVT_XFERINPROGRESS:
scnprintf(str + len, size - len,
- "Transfer In Progress [%d] (%c%c%c)",
+ "Transfer In Progress [%08x] (%c%c%c)",
event->parameters,
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
@@ -286,7 +286,7 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
break;
case DWC3_DEPEVT_XFERNOTREADY:
len += scnprintf(str + len, size - len,
- "Transfer Not Ready [%d]%s",
+ "Transfer Not Ready [%08x]%s",
event->parameters,
status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
" (Active)" : " (Not Active)");
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 039bf241769a..8cad9e7d3368 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -8,6 +8,7 @@
*/
#include <linux/extcon.h>
+#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -438,6 +439,51 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
+ const char *name;
+
+ if (device_property_read_bool(dev, "extcon"))
+ return extcon_get_edev_by_phandle(dev, 0);
+
+ /*
+ * Device tree platforms should get extcon via phandle.
+ * On ACPI platforms, we get the name from a device property.
+ * This device property is for kernel internal use only and
+ * is expected to be set by the glue code.
+ */
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+ edev = extcon_get_extcon_dev(name);
+ if (!edev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return edev;
+ }
+
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
+ np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
+
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
+ of_node_put(np_phy);
+
+ return edev;
+}
+
#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
#define ROLE_SWITCH 1
static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -542,6 +588,10 @@ int dwc3_drd_init(struct dwc3 *dwc)
device_property_read_bool(dwc->dev, "usb-role-switch"))
return dwc3_setup_role_switch(dwc);
+ dwc->edev = dwc3_get_extcon(dwc);
+ if (IS_ERR(dwc->edev))
+ return PTR_ERR(dwc->edev);
+
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 4ee4ca09873a..fb14511b1e10 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,9 +40,10 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
-#define PCI_DEVICE_ID_INTEL_ADL 0x465e
-#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
-#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
+#define PCI_DEVICE_ID_INTEL_ADL 0x460e
+#define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee
+#define PCI_DEVICE_ID_INTEL_ADLN 0x465e
+#define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPL 0x460e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
@@ -448,10 +449,13 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_PCH),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN_PCH),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index d3f3937d7005..7c40f3ffc054 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -243,6 +243,7 @@ static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
*/
static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
{
+ enum usb_device_speed max_speed;
struct device *dev = qcom->dev;
int ret;
@@ -252,7 +253,7 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
dev_err(dev, "failed to get usb-ddr path: %ld\n",
- PTR_ERR(qcom->icc_path_ddr));
+ PTR_ERR(qcom->icc_path_ddr));
return PTR_ERR(qcom->icc_path_ddr);
}
@@ -263,21 +264,20 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
return PTR_ERR(qcom->icc_path_apps);
}
- if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER ||
- usb_get_maximum_speed(&qcom->dwc3->dev) == USB_SPEED_UNKNOWN)
+ max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+ if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
ret = icc_set_bw(qcom->icc_path_ddr,
- USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
- else
+ USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
+ } else {
ret = icc_set_bw(qcom->icc_path_ddr,
- USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
-
+ USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
+ }
if (ret) {
dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
return ret;
}
- ret = icc_set_bw(qcom->icc_path_apps,
- APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
if (ret) {
dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
return ret;
@@ -1007,10 +1007,6 @@ static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
static const struct of_device_id dwc3_qcom_of_match[] = {
{ .compatible = "qcom,dwc3" },
- { .compatible = "qcom,msm8996-dwc3" },
- { .compatible = "qcom,msm8998-dwc3" },
- { .compatible = "qcom,sdm660-dwc3" },
- { .compatible = "qcom,sdm845-dwc3" },
{ }
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 67b237c7a76a..8607d4c23283 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -47,6 +47,7 @@ struct dwc3_xlnx {
struct device *dev;
void __iomem *regs;
int (*pltfm_init)(struct dwc3_xlnx *data);
+ struct phy *usb3_phy;
};
static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
@@ -100,13 +101,12 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct gpio_desc *reset_gpio;
- struct phy *usb3_phy;
int ret = 0;
u32 reg;
- usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
- if (IS_ERR(usb3_phy)) {
- ret = PTR_ERR(usb3_phy);
+ priv_data->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(priv_data->usb3_phy)) {
+ ret = PTR_ERR(priv_data->usb3_phy);
dev_err_probe(dev, ret,
"failed to get USB3 PHY\n");
goto err;
@@ -121,7 +121,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
* in use but the usb3-phy entry is missing from the device tree.
* Therefore, skip these operations in this case.
*/
- if (!usb3_phy)
+ if (!priv_data->usb3_phy)
goto skip_usb3_phy;
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
@@ -166,9 +166,9 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
- ret = phy_init(usb3_phy);
+ ret = phy_init(priv_data->usb3_phy);
if (ret < 0) {
- phy_exit(usb3_phy);
+ phy_exit(priv_data->usb3_phy);
goto err;
}
@@ -196,9 +196,9 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
- ret = phy_power_on(usb3_phy);
+ ret = phy_power_on(priv_data->usb3_phy);
if (ret < 0) {
- phy_exit(usb3_phy);
+ phy_exit(priv_data->usb3_phy);
goto err;
}
@@ -322,7 +322,7 @@ static int dwc3_xlnx_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused dwc3_xlnx_suspend_common(struct device *dev)
+static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
@@ -331,7 +331,7 @@ static int __maybe_unused dwc3_xlnx_suspend_common(struct device *dev)
return 0;
}
-static int __maybe_unused dwc3_xlnx_resume_common(struct device *dev)
+static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
@@ -346,8 +346,45 @@ static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
return 0;
}
-static UNIVERSAL_DEV_PM_OPS(dwc3_xlnx_dev_pm_ops, dwc3_xlnx_suspend_common,
- dwc3_xlnx_resume_common, dwc3_xlnx_runtime_idle);
+static int __maybe_unused dwc3_xlnx_suspend(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+
+ phy_exit(priv_data->usb3_phy);
+
+ /* Disable the clocks */
+ clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_resume(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
+ if (ret)
+ return ret;
+
+ ret = phy_init(priv_data->usb3_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(priv_data->usb3_phy);
+ if (ret < 0) {
+ phy_exit(priv_data->usb3_phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_xlnx_suspend, dwc3_xlnx_resume)
+ SET_RUNTIME_PM_OPS(dwc3_xlnx_runtime_suspend,
+ dwc3_xlnx_runtime_resume, dwc3_xlnx_runtime_idle)
+};
static struct platform_driver dwc3_xlnx_driver = {
.probe = dwc3_xlnx_probe,
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 197af63f8d05..61de693461da 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int ret;
spin_lock_irqsave(&dwc->lock, flags);
- if (!dep->endpoint.desc || !dwc->pullups_connected) {
+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
ret = -ESHUTDOWN;
@@ -293,7 +293,10 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
continue;
dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
- dwc3_stop_active_transfer(dwc3_ep, true, true);
+ if (dwc->connected)
+ dwc3_stop_active_transfer(dwc3_ep, true, true);
+ else
+ dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
}
}
@@ -815,7 +818,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
int ret = -EINVAL;
u32 len;
- if (!dwc->gadget_driver || !dwc->connected)
+ if (!dwc->gadget_driver || !dwc->softconnect || !dwc->connected)
goto out;
trace_dwc3_ctrl_req(ctrl);
@@ -1118,6 +1121,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
+ if (!dwc->softconnect || !dwc->connected)
+ return;
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index eca945feeec3..079cd333632e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -366,7 +366,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
- if (!(cmd & DWC3_DEPCMD_CMDACT)) {
+ if (!(cmd & DWC3_DEPCMD_CMDACT) ||
+ (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
+ !(cmd & DWC3_DEPCMD_CMDIOC))) {
ret = 0;
goto skip_status;
}
@@ -965,29 +967,33 @@ out:
return 0;
}
-static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
{
struct dwc3_request *req;
dwc3_stop_active_transfer(dep, true, false);
+ /* If endxfer is delayed, avoid unmapping requests */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ return;
+
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->pending_list)) {
req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->cancelled_list)) {
req = next_request(&dep->cancelled_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
}
@@ -1005,6 +1011,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
+ u32 mask;
trace_dwc3_gadget_ep_disable(dep);
@@ -1022,11 +1029,19 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->endpoint.desc = NULL;
}
- dwc3_remove_requests(dwc, dep);
+ dwc3_remove_requests(dwc, dep, -ECONNRESET);
dep->stream_capable = false;
dep->type = 0;
- dep->flags &= DWC3_EP_TXFIFO_RESIZED;
+ mask = DWC3_EP_TXFIFO_RESIZED;
+ /*
+ * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
+ * set. Do not clear DEP flags, so that the end transfer command will
+ * be reattempted during the next SETUP stage.
+ */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
+ dep->flags &= mask;
return 0;
}
@@ -2340,7 +2355,7 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
if (!dep)
continue;
- dwc3_remove_requests(dwc, dep);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
}
}
@@ -2440,7 +2455,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
- u32 timeout = 500;
+ u32 timeout = 2000;
if (pm_runtime_suspended(dwc->dev))
return 0;
@@ -2473,6 +2488,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc3_gadget_dctl_write_safe(dwc, reg);
do {
+ usleep_range(1000, 2000);
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
@@ -2501,6 +2517,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
if (dwc->ep0state != EP0_SETUP_PHASE) {
int ret;
+ if (dwc->delayed_status)
+ dwc3_ep0_send_delayed_status(dwc);
+
reinit_completion(&dwc->ep0_in_setup);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2568,6 +2587,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
+ synchronize_irq(dwc->irq_gadget);
+
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
@@ -2718,6 +2739,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
+ dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -2725,6 +2747,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
}
dep = dwc->eps[1];
+ dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -3568,7 +3591,7 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
* streams are updated, and the device controller will not be
* triggered to generate ERDY to move the next stream data. To
* workaround this and maintain compatibility with various
- * hosts, force to reinitate the stream until the host is ready
+ * hosts, force to reinitiate the stream until the host is ready
* instead of waiting for the host to prime the endpoint.
*/
if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
@@ -3596,11 +3619,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return;
/* Handle only EPCMDCMPLT when EP disabled */
- if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+ if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
+ !(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
return;
}
@@ -3695,7 +3719,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* timeout. Delay issuing the End Transfer command until the Setup TRB is
* prepared.
*/
- if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
dep->flags |= DWC3_EP_DELAY_STOP;
return;
}
@@ -3763,13 +3787,24 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_gadget_dctl_write_safe(dwc, reg);
+ dwc->connected = false;
+
dwc3_disconnect_gadget(dwc);
dwc->gadget->speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
- dwc->connected = false;
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ unsigned int dir;
+
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+ dwc3_ep0_stall_and_restart(dwc);
+ }
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -3867,6 +3902,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
u8 lanes = 1;
u8 speed;
+ if (!dwc->softconnect)
+ return;
+
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
@@ -4129,7 +4167,7 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
unsigned int is_ss = evtinfo & BIT(4);
/*
- * WORKAROUND: DWC3 revison 2.20a with hibernation support
+ * WORKAROUND: DWC3 revision 2.20a with hibernation support
* have a known issue which can cause USB CV TD.9.23 to fail
* randomly.
*
@@ -4507,12 +4545,17 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ unsigned long flags;
+
if (!dwc->gadget_driver)
return 0;
dwc3_gadget_run_stop(dwc, false, false);
+
+ spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index cb998ba50fea..1975aec8d36d 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -241,7 +241,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->enqueue = dep->trb_enqueue;
__entry->dequeue = dep->trb_dequeue;
),
- TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x sofn %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->trb, __entry->enqueue,
__entry->dequeue, __entry->bph, __entry->bpl,
({char *s;
@@ -267,6 +267,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
s = "";
} s; }),
DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
+ DWC3_TRB_CTRL_GET_SID_SOFN(__entry->ctrl),
__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e0fa4b186ec6..73dc10a77cde 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2645,10 +2645,10 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
unsigned i = 0;
vla_group(d);
vla_item(d, struct usb_gadget_strings *, stringtabs,
- lang_count + 1);
+ size_add(lang_count, 1));
vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
vla_item(d, struct usb_string, strings,
- lang_count*(needed_count+1));
+ size_mul(lang_count, (needed_count + 1)));
char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
@@ -3700,7 +3700,7 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name)
existing = _ffs_do_find_dev(name);
if (!existing)
- strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
+ strscpy(dev->name, name, ARRAY_SIZE(dev->name));
else if (existing != dev)
ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 925e99f9775c..3abf7f586e2a 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2662,11 +2662,16 @@ static ssize_t forced_eject_store(struct device *dev,
}
static DEVICE_ATTR_RW(nofua);
-/* mode wil be set in fsg_lun_attr_is_visible() */
-static DEVICE_ATTR(ro, 0, ro_show, ro_store);
-static DEVICE_ATTR(file, 0, file_show, file_store);
static DEVICE_ATTR_WO(forced_eject);
+/*
+ * Mode of the ro and file attribute files will be overridden in
+ * fsg_lun_dev_is_visible() depending on if this is a cdrom, or if it is a
+ * removable device.
+ */
+static DEVICE_ATTR_RW(ro);
+static DEVICE_ATTR_RW(file);
+
/****************************** FSG COMMON ******************************/
static void fsg_lun_release(struct device *dev)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index dc8f078f918c..c36bcfa0e9b4 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -450,39 +450,35 @@ struct ndp_parser_opts {
unsigned next_ndp_index;
};
-#define INIT_NDP16_OPTS { \
- .nth_sign = USB_CDC_NCM_NTH16_SIGN, \
- .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \
- .nth_size = sizeof(struct usb_cdc_ncm_nth16), \
- .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \
- .dpe_size = sizeof(struct usb_cdc_ncm_dpe16), \
- .ndplen_align = 4, \
- .dgram_item_len = 1, \
- .block_length = 1, \
- .ndp_index = 1, \
- .reserved1 = 0, \
- .reserved2 = 0, \
- .next_ndp_index = 1, \
- }
-
-
-#define INIT_NDP32_OPTS { \
- .nth_sign = USB_CDC_NCM_NTH32_SIGN, \
- .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \
- .nth_size = sizeof(struct usb_cdc_ncm_nth32), \
- .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \
- .dpe_size = sizeof(struct usb_cdc_ncm_dpe32), \
- .ndplen_align = 8, \
- .dgram_item_len = 2, \
- .block_length = 2, \
- .ndp_index = 2, \
- .reserved1 = 1, \
- .reserved2 = 2, \
- .next_ndp_index = 2, \
- }
+static const struct ndp_parser_opts ndp16_opts = {
+ .nth_sign = USB_CDC_NCM_NTH16_SIGN,
+ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN,
+ .nth_size = sizeof(struct usb_cdc_ncm_nth16),
+ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16),
+ .dpe_size = sizeof(struct usb_cdc_ncm_dpe16),
+ .ndplen_align = 4,
+ .dgram_item_len = 1,
+ .block_length = 1,
+ .ndp_index = 1,
+ .reserved1 = 0,
+ .reserved2 = 0,
+ .next_ndp_index = 1,
+};
-static const struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
-static const struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
+static const struct ndp_parser_opts ndp32_opts = {
+ .nth_sign = USB_CDC_NCM_NTH32_SIGN,
+ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN,
+ .nth_size = sizeof(struct usb_cdc_ncm_nth32),
+ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32),
+ .dpe_size = sizeof(struct usb_cdc_ncm_dpe32),
+ .ndplen_align = 8,
+ .dgram_item_len = 2,
+ .block_length = 2,
+ .ndp_index = 2,
+ .reserved1 = 1,
+ .reserved2 = 2,
+ .next_ndp_index = 2,
+};
static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
{
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index abec5c58f525..a881c69b1f2b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -89,7 +89,7 @@ struct printer_dev {
u8 printer_cdev_open;
wait_queue_head_t wait;
unsigned q_len;
- char *pnp_string; /* We don't own memory! */
+ char **pnp_string; /* We don't own memory! */
struct usb_function function;
};
@@ -1000,16 +1000,16 @@ static int printer_func_setup(struct usb_function *f,
if ((wIndex>>8) != dev->interface)
break;
- if (!dev->pnp_string) {
+ if (!*dev->pnp_string) {
value = 0;
break;
}
- value = strlen(dev->pnp_string);
+ value = strlen(*dev->pnp_string);
buf[0] = (value >> 8) & 0xFF;
buf[1] = value & 0xFF;
- memcpy(buf + 2, dev->pnp_string, value);
+ memcpy(buf + 2, *dev->pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
- dev->pnp_string);
+ *dev->pnp_string);
break;
case GET_PORT_STATUS: /* Get Port Status */
@@ -1475,7 +1475,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
- dev->pnp_string = opts->pnp_string;
+ dev->pnp_string = &opts->pnp_string;
dev->q_len = opts->q_len;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 8e17ac831be0..658e2e21fdd0 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2306,7 +2306,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
-static int tcm_init(void)
+static int __init tcm_init(void)
{
int ret;
@@ -2322,7 +2322,7 @@ static int tcm_init(void)
}
module_init(tcm_init);
-static void tcm_exit(void)
+static void __exit tcm_exit(void)
{
target_unregister_template(&usbg_ops);
usb_function_unregister(&tcmusb_func);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 71669e0e4d00..6e196e06181e 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -421,7 +421,7 @@ uvc_register_video(struct uvc_device *uvc)
int ret;
/* TODO reference counting. */
- memset(&uvc->vdev, 0, sizeof(uvc->video));
+ memset(&uvc->vdev, 0, sizeof(uvc->vdev));
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
uvc->vdev.fops = &uvc_v4l2_fops;
@@ -430,7 +430,7 @@ uvc_register_video(struct uvc_device *uvc)
uvc->vdev.vfl_dir = VFL_DIR_TX;
uvc->vdev.lock = &uvc->video.mutex;
uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- strlcpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
+ strscpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
video_set_drvdata(&uvc->vdev, uvc);
@@ -888,6 +888,7 @@ static void uvc_free(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
func_inst);
+ config_item_put(&uvc->header->item);
--opts->refcnt;
kfree(uvc);
}
@@ -897,10 +898,14 @@ static void uvc_function_unbind(struct usb_configuration *c,
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ struct uvc_video *video = &uvc->video;
long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__);
+ if (video->async_wq)
+ destroy_workqueue(video->async_wq);
+
/*
* If we know we're connected via v4l2, then there should be a cleanup
* of the device from userspace either via UVC_EVENT_DISCONNECT or
@@ -941,6 +946,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
struct uvc_device *uvc;
struct f_uvc_opts *opts;
struct uvc_descriptor_header **strm_cls;
+ struct config_item *streaming, *header, *h;
uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
if (uvc == NULL)
@@ -973,6 +979,28 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->desc.fs_streaming = opts->fs_streaming;
uvc->desc.hs_streaming = opts->hs_streaming;
uvc->desc.ss_streaming = opts->ss_streaming;
+
+ streaming = config_group_find_item(&opts->func_inst.group, "streaming");
+ if (!streaming)
+ goto err_config;
+
+ header = config_group_find_item(to_config_group(streaming), "header");
+ config_item_put(streaming);
+ if (!header)
+ goto err_config;
+
+ h = config_group_find_item(to_config_group(header), "h");
+ config_item_put(header);
+ if (!h)
+ goto err_config;
+
+ uvc->header = to_uvcg_streaming_header(h);
+ if (!uvc->header->linked) {
+ mutex_unlock(&opts->lock);
+ kfree(uvc);
+ return ERR_PTR(-EBUSY);
+ }
+
++opts->refcnt;
mutex_unlock(&opts->lock);
@@ -988,6 +1016,11 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->func.bind_deactivated = true;
return &uvc->func;
+
+err_config:
+ mutex_unlock(&opts->lock);
+ kfree(uvc);
+ return ERR_PTR(-ENOENT);
}
DECLARE_USB_FUNCTION_INIT(uvc, uvc_alloc_inst, uvc_alloc);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 713efd9aefde..29bf8664bf58 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -869,7 +869,7 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
static inline int rndis_get_nr(void)
{
- return ida_simple_get(&rndis_ida, 0, 0, GFP_KERNEL);
+ return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
}
static inline void rndis_put_nr(int nr)
@@ -1105,7 +1105,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
"used : %s\n"
"state : %s\n"
"medium : 0x%08X\n"
- "speed : %d\n"
+ "speed : %u\n"
"cable : %s\n"
"vendor ID : 0x%08X\n"
"vendor : %s\n",
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 7887def05dc2..e06022873df1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -144,10 +144,10 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
struct eth_dev *dev = netdev_priv(net);
- strlcpy(p->driver, "g_ether", sizeof(p->driver));
- strlcpy(p->version, UETH__VERSION, sizeof(p->version));
- strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
- strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+ strscpy(p->driver, "g_ether", sizeof(p->driver));
+ strscpy(p->version, UETH__VERSION, sizeof(p->version));
+ strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
}
/* REVISIT can also support:
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 6f68cbeeee7c..7538279f9817 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1443,7 +1443,7 @@ void gserial_resume(struct gserial *gser)
}
EXPORT_SYMBOL_GPL(gserial_resume);
-static int userial_init(void)
+static int __init userial_init(void)
{
struct tty_driver *driver;
unsigned i;
@@ -1496,7 +1496,7 @@ fail:
}
module_init(userial_init);
-static void userial_cleanup(void)
+static void __exit userial_cleanup(void)
{
tty_unregister_driver(gs_tty_driver);
tty_driver_kref_put(gs_tty_driver);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 58e383afdd44..40226b1f7e14 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -88,6 +88,7 @@ struct uvc_video {
struct usb_ep *ep;
struct work_struct pump;
+ struct workqueue_struct *async_wq;
/* Frame parameters */
u8 bpp;
@@ -133,6 +134,8 @@ struct uvc_device {
bool func_connected;
wait_queue_head_t func_connected_queue;
+ struct uvcg_streaming_header *header;
+
/* Descriptors */
struct {
const struct uvc_descriptor_header * const *fs_control;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index fd8f73bb726d..c4ed48d6b8a4 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -18,12 +18,161 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-uvc.h>
#include "f_uvc.h"
#include "uvc.h"
#include "uvc_queue.h"
#include "uvc_video.h"
#include "uvc_v4l2.h"
+#include "uvc_configfs.h"
+
+static struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
+{
+ char guid[16] = UVC_GUID_FORMAT_MJPEG;
+ struct uvc_format_desc *format;
+ struct uvcg_uncompressed *unc;
+
+ if (uformat->type == UVCG_UNCOMPRESSED) {
+ unc = to_uvcg_uncompressed(&uformat->group.cg_item);
+ if (!unc)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(guid, unc->desc.guidFormat, sizeof(guid));
+ }
+
+ format = uvc_format_by_guid(guid);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+
+ return format;
+}
+
+static int uvc_v4l2_get_bytesperline(struct uvcg_format *uformat,
+ struct uvcg_frame *uframe)
+{
+ struct uvcg_uncompressed *u;
+
+ if (uformat->type == UVCG_UNCOMPRESSED) {
+ u = to_uvcg_uncompressed(&uformat->group.cg_item);
+ if (!u)
+ return 0;
+
+ return u->desc.bBitsPerPixel * uframe->frame.w_width / 8;
+ }
+
+ return 0;
+}
+
+static int uvc_get_frame_size(struct uvcg_format *uformat,
+ struct uvcg_frame *uframe)
+{
+ unsigned int bpl = uvc_v4l2_get_bytesperline(uformat, uframe);
+
+ return bpl ? bpl * uframe->frame.w_height :
+ uframe->frame.dw_max_video_frame_buffer_size;
+}
+
+static struct uvcg_format *find_format_by_index(struct uvc_device *uvc, int index)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_format *uformat = NULL;
+ int i = 1;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (index == i) {
+ uformat = format->fmt;
+ break;
+ }
+ i++;
+ }
+
+ return uformat;
+}
+
+static struct uvcg_frame *find_frame_by_index(struct uvc_device *uvc,
+ struct uvcg_format *uformat,
+ int index)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_frame_ptr *frame;
+ struct uvcg_frame *uframe = NULL;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (format->fmt->type != uformat->type)
+ continue;
+ list_for_each_entry(frame, &format->fmt->frames, entry) {
+ if (index == frame->frm->frame.b_frame_index) {
+ uframe = frame->frm;
+ break;
+ }
+ }
+ }
+
+ return uframe;
+}
+
+static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
+ u32 pixelformat)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_format *uformat = NULL;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+
+ if (fmtdesc->fcc == pixelformat) {
+ uformat = format->fmt;
+ break;
+ }
+ }
+
+ return uformat;
+}
+
+static struct uvcg_frame *find_closest_frame_by_size(struct uvc_device *uvc,
+ struct uvcg_format *uformat,
+ u16 rw, u16 rh)
+{
+ struct uvc_video *video = &uvc->video;
+ struct uvcg_format_ptr *format;
+ struct uvcg_frame_ptr *frame;
+ struct uvcg_frame *uframe = NULL;
+ unsigned int d, maxd;
+
+ /* Find the closest image size. The distance between image sizes is
+ * the size in pixels of the non-overlapping regions between the
+ * requested size and the frame-specified size.
+ */
+ maxd = (unsigned int)-1;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (format->fmt->type != uformat->type)
+ continue;
+
+ list_for_each_entry(frame, &format->fmt->frames, entry) {
+ u16 w, h;
+
+ w = frame->frm->frame.w_width;
+ h = frame->frm->frame.w_height;
+
+ d = min(w, rw) * min(h, rh);
+ d = w*h + rw*rh - 2*d;
+ if (d < maxd) {
+ maxd = d;
+ uframe = frame->frm;
+ }
+
+ if (maxd == 0)
+ break;
+ }
+ }
+
+ if (!uframe)
+ uvcg_dbg(&video->uvc->func, "Unsupported size %ux%u\n", rw, rh);
+
+ return uframe;
+}
/* --------------------------------------------------------------------------
* Requests handling
@@ -67,9 +216,9 @@ uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
struct uvc_device *uvc = video_get_drvdata(vdev);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
- strlcpy(cap->driver, "g_uvc", sizeof(cap->driver));
- strlcpy(cap->card, cdev->gadget->name, sizeof(cap->card));
- strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev),
+ strscpy(cap->driver, "g_uvc", sizeof(cap->driver));
+ strscpy(cap->card, cdev->gadget->name, sizeof(cap->card));
+ strscpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
return 0;
}
@@ -135,6 +284,139 @@ uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
static int
+uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_video *video = &uvc->video;
+ struct uvcg_format *uformat;
+ struct uvcg_frame *uframe;
+ u8 *fcc;
+
+ if (fmt->type != video->queue.queue.type)
+ return -EINVAL;
+
+ fcc = (u8 *)&fmt->fmt.pix.pixelformat;
+ uvcg_dbg(&uvc->func, "Trying format 0x%08x (%c%c%c%c): %ux%u\n",
+ fmt->fmt.pix.pixelformat,
+ fcc[0], fcc[1], fcc[2], fcc[3],
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
+
+ uformat = find_format_by_pix(uvc, fmt->fmt.pix.pixelformat);
+ if (!uformat)
+ return -EINVAL;
+
+ uframe = find_closest_frame_by_size(uvc, uformat,
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
+ if (!uframe)
+ return -EINVAL;
+
+ fmt->fmt.pix.width = uframe->frame.w_width;
+ fmt->fmt.pix.height = uframe->frame.w_height;
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe);
+ fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe);
+ fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvcg_format *uformat = NULL;
+ struct uvcg_frame *uframe = NULL;
+ struct uvcg_frame_ptr *frame;
+
+ uformat = find_format_by_pix(uvc, fival->pixel_format);
+ if (!uformat)
+ return -EINVAL;
+
+ list_for_each_entry(frame, &uformat->frames, entry) {
+ if (frame->frm->frame.w_width == fival->width &&
+ frame->frm->frame.w_height == fival->height) {
+ uframe = frame->frm;
+ break;
+ }
+ }
+ if (!uframe)
+ return -EINVAL;
+
+ if (fival->index >= uframe->frame.b_frame_interval_type)
+ return -EINVAL;
+
+ fival->discrete.numerator =
+ uframe->dw_frame_interval[fival->index];
+
+ /* TODO: handle V4L2_FRMIVAL_TYPE_STEPWISE */
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete.denominator = 10000000;
+ v4l2_simplify_fraction(&fival->discrete.numerator,
+ &fival->discrete.denominator, 8, 333);
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvcg_format *uformat = NULL;
+ struct uvcg_frame *uframe = NULL;
+
+ uformat = find_format_by_pix(uvc, fsize->pixel_format);
+ if (!uformat)
+ return -EINVAL;
+
+ if (fsize->index >= uformat->num_frames)
+ return -EINVAL;
+
+ uframe = find_frame_by_index(uvc, uformat, fsize->index + 1);
+ if (!uframe)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = uframe->frame.w_width;
+ fsize->discrete.height = uframe->frame.w_height;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_format_desc *fmtdesc;
+ struct uvcg_format *uformat;
+
+ if (f->index >= uvc->header->num_fmt)
+ return -EINVAL;
+
+ uformat = find_format_by_index(uvc, f->index + 1);
+ if (!uformat)
+ return -EINVAL;
+
+ if (uformat->type != UVCG_UNCOMPRESSED)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+
+ fmtdesc = to_uvc_format(uformat);
+ f->pixelformat = fmtdesc->fcc;
+
+ strscpy(f->description, fmtdesc->name, sizeof(f->description));
+ f->description[strlen(fmtdesc->name) - 1] = 0;
+
+ return 0;
+}
+
+static int
uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b)
{
struct video_device *vdev = video_devdata(file);
@@ -170,7 +452,7 @@ uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
return ret;
if (uvc->state == UVC_STATE_STREAMING)
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
return ret;
}
@@ -298,8 +580,12 @@ uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio,
const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = {
.vidioc_querycap = uvc_v4l2_querycap,
+ .vidioc_try_fmt_vid_out = uvc_v4l2_try_format,
.vidioc_g_fmt_vid_out = uvc_v4l2_get_format,
.vidioc_s_fmt_vid_out = uvc_v4l2_set_format,
+ .vidioc_enum_frameintervals = uvc_v4l2_enum_frameintervals,
+ .vidioc_enum_framesizes = uvc_v4l2_enum_framesizes,
+ .vidioc_enum_fmt_vid_out = uvc_v4l2_enum_format,
.vidioc_reqbufs = uvc_v4l2_reqbufs,
.vidioc_querybuf = uvc_v4l2_querybuf,
.vidioc_qbuf = uvc_v4l2_qbuf,
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index c00ce0e91f5d..bb037fcc90e6 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -277,7 +277,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
spin_unlock_irqrestore(&video->req_lock, flags);
if (uvc->state == UVC_STATE_STREAMING)
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
}
static int
@@ -485,7 +485,7 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
video->req_int_count = 0;
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
return ret;
}
@@ -499,6 +499,11 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);
+ /* Allocate a work queue for asynchronous video pump handler. */
+ video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!video->async_wq)
+ return -EINVAL;
+
video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 728987280373..a9a7b3fc60ec 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -994,7 +994,7 @@ static const struct usb_gadget_ops at91_udc_ops = {
.udc_stop = at91_stop,
/*
- * VBUS-powered devices may also also want to support bigger
+ * VBUS-powered devices may also want to support bigger
* power budgets after an appropriate SET_CONFIGURATION.
*/
/* .vbus_power = at91_vbus_power, */
@@ -1779,12 +1779,14 @@ static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
board->vbus_polled = 1;
- board->vbus_pin = gpiod_get_from_of_node(np, "atmel,vbus-gpio", 0,
- GPIOD_IN, "udc_vbus");
+ board->vbus_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,vbus", 0, GPIOD_IN,
+ "udc_vbus");
if (IS_ERR(board->vbus_pin))
board->vbus_pin = NULL;
- board->pullup_pin = gpiod_get_from_of_node(np, "atmel,pullup-gpio", 0,
+ board->pullup_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,pullup", 0,
GPIOD_ASIS, "udc_pullup");
if (IS_ERR(board->pullup_pin))
board->pullup_pin = NULL;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index c97cd4bc817c..84605a4d0715 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -91,7 +91,7 @@ module_param(dma_mode, ushort, 0644);
* mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
* mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
*/
-static ushort fifo_mode = 0;
+static ushort fifo_mode;
module_param(fifo_mode, ushort, 0644);
/*
@@ -100,7 +100,7 @@ module_param(fifo_mode, ushort, 0644);
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices. For bus powered devices set this to 1.
*/
-static ushort enable_suspend = 0;
+static ushort enable_suspend;
module_param(enable_suspend, ushort, 0644);
static void assert_out_naking(struct net2272_ep *ep, const char *where)
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 61cabb9de6ae..bea346e362b2 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2234,7 +2234,7 @@ static int proc_otg_show(struct seq_file *s)
char *ctrl_name = "(UNKNOWN)";
tmp = omap_readl(OTG_REV);
- ctrl_name = "tranceiver_ctrl";
+ ctrl_name = "transceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
tmp >> 4, tmp & 0xf, ctrl_name, trans);
@@ -2558,7 +2558,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
/* set up driver data structures */
BUG_ON(strlen(name) >= sizeof ep->name);
- strlcpy(ep->name, name, sizeof ep->name);
+ strscpy(ep->name, name, sizeof(ep->name));
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->iso);
ep->bEndpointAddress = addr;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 648be3fd476a..615ba0a6fbee 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -17,6 +17,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -38,16 +39,16 @@
#define USB3_USB20_CON 0x204
#define USB3_USB30_CON 0x208
#define USB3_USB_STA 0x210
-#define USB3_DRD_CON 0x218
+#define USB3_DRD_CON(p) ((p)->is_rzv2m ? 0x400 : 0x218)
#define USB3_USB_INT_STA_1 0x220
#define USB3_USB_INT_STA_2 0x224
#define USB3_USB_INT_ENA_1 0x228
#define USB3_USB_INT_ENA_2 0x22c
#define USB3_STUP_DAT_0 0x230
#define USB3_STUP_DAT_1 0x234
-#define USB3_USB_OTG_STA 0x268
-#define USB3_USB_OTG_INT_STA 0x26c
-#define USB3_USB_OTG_INT_ENA 0x270
+#define USB3_USB_OTG_STA(p) ((p)->is_rzv2m ? 0x410 : 0x268)
+#define USB3_USB_OTG_INT_STA(p) ((p)->is_rzv2m ? 0x414 : 0x26c)
+#define USB3_USB_OTG_INT_ENA(p) ((p)->is_rzv2m ? 0x418 : 0x270)
#define USB3_P0_MOD 0x280
#define USB3_P0_CON 0x288
#define USB3_P0_STA 0x28c
@@ -135,6 +136,8 @@
#define USB_STA_VBUS_STA BIT(0)
/* DRD_CON */
+#define DRD_CON_PERI_RST BIT(31) /* rzv2m only */
+#define DRD_CON_HOST_RST BIT(30) /* rzv2m only */
#define DRD_CON_PERI_CON BIT(24)
#define DRD_CON_VBOUT BIT(0)
@@ -155,7 +158,7 @@
#define USB_INT_2_PIPE(n) BIT(n)
/* USB_OTG_STA, USB_OTG_INT_STA and USB_OTG_INT_ENA */
-#define USB_OTG_IDMON BIT(4)
+#define USB_OTG_IDMON(p) ((p)->is_rzv2m ? BIT(0) : BIT(4))
/* P0_MOD */
#define P0_MOD_DIR BIT(6)
@@ -255,7 +258,7 @@
#define USB3_EP0_SS_MAX_PACKET_SIZE 512
#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
#define USB3_EP0_BUF_SIZE 8
-#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
+#define USB3_MAX_NUM_PIPES(p) ((p)->is_rzv2m ? 16 : 6) /* This includes PIPE 0 */
#define USB3_WAIT_US 3
#define USB3_DMA_NUM_SETTING_AREA 4
/*
@@ -326,10 +329,13 @@ struct renesas_usb3_priv {
int num_ramif;
int ramsize_per_pipe; /* unit = bytes */
bool workaround_for_vbus; /* if true, don't check vbus signal */
+ bool is_rzv2m; /* if true, RZ/V2M SoC */
};
struct renesas_usb3 {
void __iomem *reg;
+ struct reset_control *drd_rstc;
+ struct reset_control *usbp_rstc;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
@@ -363,6 +369,7 @@ struct renesas_usb3 {
bool forced_b_device;
bool start_to_connect;
bool role_sw_by_connector;
+ bool is_rzv2m;
};
#define gadget_to_renesas_usb3(_gadget) \
@@ -467,7 +474,7 @@ static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
static bool usb3_is_host(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_DRD_CON) & DRD_CON_PERI_CON);
+ return !(usb3_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
}
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
@@ -674,10 +681,20 @@ static void renesas_usb3_role_work(struct work_struct *work)
static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
+ if (usb3->is_rzv2m) {
+ if (host) {
+ usb3_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ } else {
+ usb3_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ }
+ }
+
if (host)
- usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
else
- usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
}
static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
@@ -693,9 +710,9 @@ static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
- usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+ usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
else
- usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
}
static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
@@ -716,7 +733,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
static bool usb3_is_a_device(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_USB_OTG_STA) & USB_OTG_IDMON);
+ return !(usb3_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
}
static void usb3_check_id(struct renesas_usb3 *usb3)
@@ -739,8 +756,8 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
USB3_USB_COM_CON);
- usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
- usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
usb3_check_id(usb3);
usb3_check_vbus(usb3);
@@ -750,7 +767,7 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
- usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
+ usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
@@ -2005,9 +2022,15 @@ static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
usb3_check_id(usb3);
}
-static void usb3_irq_otg_int(struct renesas_usb3 *usb3, u32 otg_int_sta)
+static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
{
- if (otg_int_sta & USB_OTG_IDMON)
+ u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA(usb3));
+
+ otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
+ if (otg_int_sta)
+ usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
+
+ if (otg_int_sta & USB_OTG_IDMON(usb3))
usb3_irq_idmon_change(usb3);
}
@@ -2015,7 +2038,6 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
{
u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1);
u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2);
- u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA);
int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1);
if (int_sta_1) {
@@ -2027,11 +2049,8 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
if (int_sta_2)
usb3_irq_epc_int_2(usb3, int_sta_2);
- otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA);
- if (otg_int_sta) {
- usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA);
- usb3_irq_otg_int(usb3, otg_int_sta);
- }
+ if (!usb3->is_rzv2m)
+ usb3_irq_otg_int(usb3);
}
static void usb3_irq_dma_int(struct renesas_usb3 *usb3, u32 dma_sta)
@@ -2085,6 +2104,15 @@ static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
return ret;
}
+static irqreturn_t renesas_usb3_otg_irq(int irq, void *_usb3)
+{
+ struct renesas_usb3 *usb3 = _usb3;
+
+ usb3_irq_otg_int(usb3);
+
+ return IRQ_HANDLED;
+}
+
static void usb3_write_pn_mod(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
@@ -2571,6 +2599,8 @@ static int renesas_usb3_remove(struct platform_device *pdev)
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2589,8 +2619,8 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
usb3->num_usb3_eps = priv->ramsize_per_ramif * priv->num_ramif * 2 /
priv->ramsize_per_pipe + 1;
- if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES)
- usb3->num_usb3_eps = USB3_MAX_NUM_PIPES;
+ if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES(usb3))
+ usb3->num_usb3_eps = USB3_MAX_NUM_PIPES(usb3);
usb3->usb3_ep = devm_kcalloc(dev,
usb3->num_usb3_eps, sizeof(*usb3_ep),
@@ -2707,6 +2737,13 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
.workaround_for_vbus = true,
};
+static const struct renesas_usb3_priv renesas_usb3_priv_rzv2m = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 1,
+ .ramsize_per_pipe = SZ_4K,
+ .is_rzv2m = true,
+};
+
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a774c0-usb3-peri",
@@ -2718,6 +2755,9 @@ static const struct of_device_id usb3_of_match[] = {
.compatible = "renesas,r8a77990-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
+ .compatible = "renesas,rzv2m-usb3-peri",
+ .data = &renesas_usb3_priv_rzv2m,
+ }, {
.compatible = "renesas,rcar-gen3-usb3-peri",
.data = &renesas_usb3_priv_gen3,
},
@@ -2748,7 +2788,7 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- int irq, ret;
+ int irq, drd_irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2762,10 +2802,18 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ if (priv->is_rzv2m) {
+ drd_irq = platform_get_irq_byname(pdev, "drd");
+ if (drd_irq < 0)
+ return drd_irq;
+ }
+
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
+ usb3->is_rzv2m = priv->is_rzv2m;
+
usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
@@ -2787,6 +2835,14 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (usb3->is_rzv2m) {
+ ret = devm_request_irq(&pdev->dev, drd_irq,
+ renesas_usb3_otg_irq, 0,
+ dev_name(&pdev->dev), usb3);
+ if (ret < 0)
+ return ret;
+ }
+
INIT_WORK(&usb3->extcon_work, renesas_usb3_extcon_work);
usb3->extcon = devm_extcon_dev_allocate(&pdev->dev, renesas_usb3_cable);
if (IS_ERR(usb3->extcon))
@@ -2817,10 +2873,27 @@ static int renesas_usb3_probe(struct platform_device *pdev)
goto err_add_udc;
}
+ usb3->drd_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "drd_reset");
+ if (IS_ERR(usb3->drd_rstc)) {
+ ret = PTR_ERR(usb3->drd_rstc);
+ goto err_add_udc;
+ }
+
+ usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "aresetn_p");
+ if (IS_ERR(usb3->usbp_rstc)) {
+ ret = PTR_ERR(usb3->usbp_rstc);
+ goto err_add_udc;
+ }
+
+ reset_control_deassert(usb3->drd_rstc);
+ reset_control_deassert(usb3->usbp_rstc);
+
pm_runtime_enable(&pdev->dev);
ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
if (ret < 0)
- goto err_add_udc;
+ goto err_reset;
ret = device_create_file(&pdev->dev, &dev_attr_role);
if (ret < 0)
@@ -2858,6 +2931,10 @@ static int renesas_usb3_probe(struct platform_device *pdev)
err_dev_create:
usb_del_gadget_udc(&usb3->gadget);
+err_reset:
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
+
err_add_udc:
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index c6625aeb7bca..8c57b191e52b 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -23,7 +23,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/io.h>
@@ -1419,8 +1419,7 @@ static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
{
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- if (udc_info && (udc_info->udc_command ||
- gpio_is_valid(udc_info->pullup_pin))) {
+ if (udc_info && (udc_info->udc_command || udc->pullup_gpiod)) {
if (is_on)
s3c2410_udc_enable(udc);
@@ -1467,9 +1466,7 @@ static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
- if (udc_info->vbus_pin_inverted)
- value = !value;
+ value = gpiod_get_value(dev->vbus_gpiod);
if (value != dev->vbus)
s3c2410_udc_vbus_session(&dev->gadget, value);
@@ -1504,14 +1501,15 @@ static const struct usb_gadget_ops s3c2410_ops = {
.udc_stop = s3c2410_udc_stop,
};
-static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
+static void s3c2410_udc_command(struct s3c2410_udc *udc,
+ enum s3c2410_udc_cmd_e cmd)
{
if (!udc_info)
return;
if (udc_info->udc_command) {
udc_info->udc_command(cmd);
- } else if (gpio_is_valid(udc_info->pullup_pin)) {
+ } else if (udc->pullup_gpiod) {
int value;
switch (cmd) {
@@ -1524,9 +1522,8 @@ static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
default:
return;
}
- value ^= udc_info->pullup_pin_inverted;
- gpio_set_value(udc_info->pullup_pin, value);
+ gpiod_set_value(udc->pullup_gpiod, value);
}
}
@@ -1551,7 +1548,7 @@ static void s3c2410_udc_disable(struct s3c2410_udc *dev)
udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
/* Good bye, cruel world */
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+ s3c2410_udc_command(dev, S3C2410_UDC_P_DISABLE);
/* Set speed to unknown */
dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1613,7 +1610,7 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
/* time to say "hello, world" */
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+ s3c2410_udc_command(dev, S3C2410_UDC_P_ENABLE);
}
static int s3c2410_udc_start(struct usb_gadget *g,
@@ -1802,14 +1799,15 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
dev_dbg(dev, "got irq %i\n", irq_usbd);
- if (udc_info && udc_info->vbus_pin > 0) {
- retval = gpio_request(udc_info->vbus_pin, "udc vbus");
- if (retval < 0) {
- dev_err(dev, "cannot claim vbus pin\n");
- goto err_int;
- }
+ udc->vbus_gpiod = gpiod_get_optional(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(udc->vbus_gpiod)) {
+ retval = PTR_ERR(udc->vbus_gpiod);
+ goto err_int;
+ }
+ if (udc->vbus_gpiod) {
+ gpiod_set_consumer_name(udc->vbus_gpiod, "udc vbus");
- irq = gpio_to_irq(udc_info->vbus_pin);
+ irq = gpiod_to_irq(udc->vbus_gpiod);
if (irq < 0) {
dev_err(dev, "no irq for gpio vbus pin\n");
retval = irq;
@@ -1833,16 +1831,12 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
udc->vbus = 1;
}
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin)) {
-
- retval = gpio_request_one(udc_info->pullup_pin,
- udc_info->vbus_pin_inverted ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "udc pullup");
- if (retval)
- goto err_vbus_irq;
+ udc->pullup_gpiod = gpiod_get_optional(dev, "pullup", GPIOD_OUT_LOW);
+ if (IS_ERR(udc->pullup_gpiod)) {
+ retval = PTR_ERR(udc->pullup_gpiod);
+ goto err_vbus_irq;
}
+ gpiod_set_consumer_name(udc->pullup_gpiod, "udc pullup");
retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
@@ -1856,15 +1850,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin))
- gpio_free(udc_info->pullup_pin);
err_vbus_irq:
- if (udc_info && udc_info->vbus_pin > 0)
- free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
err_gpio_claim:
- if (udc_info && udc_info->vbus_pin > 0)
- gpio_free(udc_info->vbus_pin);
err_int:
free_irq(irq_usbd, udc);
err_udc_clk:
@@ -1885,7 +1874,6 @@ err_usb_bus_clk:
static int s3c2410_udc_remove(struct platform_device *pdev)
{
struct s3c2410_udc *udc = platform_get_drvdata(pdev);
- unsigned int irq;
dev_dbg(&pdev->dev, "%s()\n", __func__);
@@ -1895,14 +1883,8 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
usb_del_gadget_udc(&udc->gadget);
debugfs_remove(debugfs_lookup("registers", s3c2410_udc_debugfs_root));
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin))
- gpio_free(udc_info->pullup_pin);
-
- if (udc_info && udc_info->vbus_pin > 0) {
- irq = gpio_to_irq(udc_info->vbus_pin);
- free_irq(irq, udc);
- }
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
free_irq(irq_usbd, udc);
@@ -1926,14 +1908,18 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
static int
s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
{
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_DISABLE);
return 0;
}
static int s3c2410_udc_resume(struct platform_device *pdev)
{
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_ENABLE);
return 0;
}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index 135a5bff3c74..cdbf202e5ee8 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -83,6 +83,9 @@ struct s3c2410_udc {
u32 port_status;
int ep0state;
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
+
unsigned got_irq : 1;
unsigned req_std : 1;
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 3c37effdfa64..76919d7570d2 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -2,7 +2,7 @@
/*
* NVIDIA Tegra XUSB device mode controller
*
- * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
@@ -702,6 +702,8 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
pm_runtime_get_sync(xudc->dev);
+ tegra_phy_xusb_utmi_pad_power_on(xudc->curr_utmi_phy);
+
err = phy_power_on(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
@@ -756,6 +758,8 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
/* Make sure interrupt handler has completed before powergating. */
synchronize_irq(xudc->irq);
+ tegra_phy_xusb_utmi_pad_power_down(xudc->curr_utmi_phy);
+
err = phy_power_off(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fd9264cf6c87..247568bc17a2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -205,12 +205,12 @@ config USB_EHCI_FSL
Variation of ARC USB block used in some Freescale chips.
config USB_EHCI_HCD_NPCM7XX
- tristate "Support for Nuvoton NPCM7XX on-chip EHCI USB controller"
- depends on (USB_EHCI_HCD && ARCH_NPCM7XX) || COMPILE_TEST
- default y if (USB_EHCI_HCD && ARCH_NPCM7XX)
+ tristate "Support for Nuvoton NPCM on-chip EHCI USB controller"
+ depends on (USB_EHCI_HCD && ARCH_NPCM) || COMPILE_TEST
+ default y if (USB_EHCI_HCD && ARCH_NPCM)
help
Enables support for the on-chip EHCI controller on
- Nuvoton NPCM7XX chips.
+ Nuvoton NPCM chips.
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 05d41fd65f25..8b775e7bab06 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -25,8 +25,6 @@
#define DRIVER_DESC "EHCI Atmel driver"
-static const char hcd_name[] = "ehci-atmel";
-
#define EHCI_INSNREG(index) ((index) * 4 + 0x90)
#define EHCI_INSNREG08_HSIC_EN BIT(2)
@@ -239,7 +237,6 @@ static int __init ehci_atmel_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ehci_init_driver(&ehci_atmel_hc_driver, &ehci_atmel_drv_overrides);
return platform_driver_register(&ehci_atmel_driver);
}
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 1a9b7572e17f..a333231616f4 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
@@ -32,7 +32,6 @@
(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
-static const char hcd_name[] = "ehci-exynos";
static struct hc_driver __read_mostly exynos_ehci_hc_driver;
#define PHY_NUMBER 3
@@ -132,20 +131,13 @@ static void exynos_ehci_phy_disable(struct device *dev)
static void exynos_setup_vbus_gpio(struct device *dev)
{
+ struct gpio_desc *gpio;
int err;
- int gpio;
- if (!dev->of_node)
- return;
-
- gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
- "ehci_vbus_gpio");
+ gpio = devm_gpiod_get_optional(dev, "samsung,vbus", GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(gpio);
if (err)
- dev_err(dev, "can't request ehci vbus gpio %d", gpio);
+ dev_err(dev, "can't request ehci vbus gpio: %d\n", err);
}
static int exynos_ehci_probe(struct platform_device *pdev)
@@ -347,7 +339,6 @@ static int __init ehci_exynos_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ehci_driver);
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 896c0d107f72..9cea785934e5 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -722,8 +722,6 @@ static int __init ehci_fsl_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info(DRV_NAME ": " DRIVER_DESC "\n");
-
ehci_init_driver(&fsl_ehci_hc_driver, &ehci_fsl_overrides);
fsl_ehci_hc_driver.product_desc =
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 684164fa9716..a1930db0da1c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1351,7 +1351,6 @@ static int __init ehci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index 6b5a7a873e01..63af1a827fcb 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -22,19 +22,8 @@
#include "ehci.h"
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
#define DRIVER_DESC "EHCI npcm7xx driver"
-static const char hcd_name[] = "npcm7xx-ehci";
-
-#define USB2PHYCTL_OFFSET 0x144
-
-#define IPSRST2_OFFSET 0x24
-#define IPSRST3_OFFSET 0x34
-
-
static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
static int __maybe_unused ehci_npcm7xx_drv_suspend(struct device *dev)
@@ -60,52 +49,12 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
- struct regmap *gcr_regmap;
- struct regmap *rst_regmap;
const struct hc_driver *driver = &ehci_npcm7xx_hc_driver;
int irq;
int retval;
dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
- gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
- if (IS_ERR(gcr_regmap)) {
- dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-gcr\n",
- __func__);
- return PTR_ERR(gcr_regmap);
- }
-
- rst_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(rst_regmap)) {
- dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-rst\n",
- __func__);
- return PTR_ERR(rst_regmap);
- }
-
- /********* phy init ******/
- // reset usb host
- regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
- (0x1 << 26), (0x1 << 26));
- regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
- (0x1 << 25), (0x1 << 25));
- regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
- (0x1 << 28), 0);
-
- udelay(1);
-
- // enable phy
- regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
- (0x1 << 25), 0);
-
- udelay(50); // enable phy
-
- regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
- (0x1 << 28), (0x1 << 28));
-
- // enable host
- regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
- (0x1 << 26), 0);
-
if (usb_disabled())
return -ENODEV;
@@ -191,8 +140,6 @@ static int __init ehci_npcm7xx_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_npcm7xx_hc_driver, NULL);
return platform_driver_register(&npcm7xx_ehci_hcd_driver);
}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 8c45bc17a580..7dd984722a7f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -284,8 +284,6 @@ static int __init ehci_omap_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
return platform_driver_register(&ehci_hcd_omap_driver);
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 3626758b3e2a..a3454a3ea4e0 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -65,8 +65,6 @@ struct orion_ehci_hcd {
struct phy *phy;
};
-static const char hcd_name[] = "ehci-orion";
-
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
@@ -361,8 +359,6 @@ static int __init ehci_orion_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
return platform_driver_register(&ehci_orion_driver);
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 9937c5a7efc2..17f8b6ea0c35 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -382,7 +382,7 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (is_bypassed_id(pdev))
return -ENODEV;
- return usb_hcd_pci_probe(pdev, id, &ehci_pci_hc_driver);
+ return usb_hcd_pci_probe(pdev, &ehci_pci_hc_driver);
}
static void ehci_pci_remove(struct pci_dev *pdev)
@@ -423,8 +423,6 @@ static int __init ehci_pci_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
/* Entries for the PCI suspend/resume callbacks are special */
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 6924f0316e9a..fe497c876d76 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -53,8 +53,6 @@ struct ehci_platform_priv {
struct delayed_work poll_work;
};
-static const char hcd_name[] = "ehci-platform";
-
static int ehci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
@@ -529,8 +527,6 @@ static int __init ehci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 807e64991e3e..666f5c4db25a 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -645,7 +645,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe);
+ maxpacket = usb_endpoint_maxp(&urb->ep->desc);
/*
* buffer gets wrapped in one or more qtds;
@@ -1218,7 +1218,7 @@ static int ehci_submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = usb_maxpacket(urb->dev, urb->pipe);
+ maxpacket = usb_endpoint_maxp(&urb->ep->desc);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 3694e450a11a..c4ddd1022f60 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -24,8 +24,6 @@
#define DRIVER_DESC "EHCI SPEAr driver"
-static const char hcd_name[] = "SPEAr-ehci";
-
struct spear_ehci {
struct clk *clk;
};
@@ -167,8 +165,6 @@ static int __init ehci_spear_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ehci_hcd_driver);
}
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index f74433aac948..f731dc98c533 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -42,8 +42,6 @@ struct st_ehci_platform_priv {
#define hcd_to_ehci_priv(h) \
((struct st_ehci_platform_priv *)hcd_to_ehci(h)->priv)
-static const char hcd_name[] = "ehci-st";
-
#define EHCI_CAPS_SIZE 0x10
#define AHB2STBUS_INSREG01 (EHCI_CAPS_SIZE + 0x84)
@@ -346,8 +344,6 @@ static int __init ehci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 2ba09c3fbc2f..95a44462bed0 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,8 +25,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include <asm/fsl_gtm.h>
#include "fhci.h"
@@ -150,15 +150,15 @@ int fhci_ioports_check_bus_state(struct fhci_hcd *fhci)
u8 bits = 0;
/* check USBOE,if transmitting,exit */
- if (!gpio_get_value(fhci->gpios[GPIO_USBOE]))
+ if (!gpiod_get_value(fhci->gpiods[GPIO_USBOE]))
return -1;
/* check USBRP */
- if (gpio_get_value(fhci->gpios[GPIO_USBRP]))
+ if (gpiod_get_value(fhci->gpiods[GPIO_USBRP]))
bits |= 0x2;
/* check USBRN */
- if (gpio_get_value(fhci->gpios[GPIO_USBRN]))
+ if (gpiod_get_value(fhci->gpiods[GPIO_USBRN]))
bits |= 0x1;
return bits;
@@ -630,40 +630,23 @@ static int of_fhci_probe(struct platform_device *ofdev)
/* GPIOs and pins */
for (i = 0; i < NUM_GPIOS; i++) {
- int gpio;
- enum of_gpio_flags flags;
-
- gpio = of_get_gpio_flags(node, i, &flags);
- fhci->gpios[i] = gpio;
- fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW;
-
- if (!gpio_is_valid(gpio)) {
- if (i < GPIO_SPEED) {
- dev_err(dev, "incorrect GPIO%d: %d\n",
- i, gpio);
- goto err_gpios;
- } else {
- dev_info(dev, "assuming board doesn't have "
- "%s gpio\n", i == GPIO_SPEED ?
- "speed" : "power");
- continue;
- }
- }
+ if (i < GPIO_SPEED)
+ fhci->gpiods[i] = devm_gpiod_get_index(dev,
+ NULL, i, GPIOD_IN);
+
+ else
+ fhci->gpiods[i] = devm_gpiod_get_index_optional(dev,
+ NULL, i, GPIOD_OUT_LOW);
- ret = gpio_request(gpio, dev_name(dev));
- if (ret) {
- dev_err(dev, "failed to request gpio %d", i);
+ if (IS_ERR(fhci->gpiods[i])) {
+ dev_err(dev, "incorrect GPIO%d: %ld\n",
+ i, PTR_ERR(fhci->gpiods[i]));
goto err_gpios;
}
-
- if (i >= GPIO_SPEED) {
- ret = gpio_direction_output(gpio, 0);
- if (ret) {
- dev_err(dev, "failed to set gpio %d as "
- "an output\n", i);
- i++;
- goto err_gpios;
- }
+ if (!fhci->gpiods[i]) {
+ dev_info(dev, "assuming board doesn't have "
+ "%s gpio\n", i == GPIO_SPEED ?
+ "speed" : "power");
}
}
@@ -766,10 +749,6 @@ err_pins:
while (--j >= 0)
qe_pin_free(fhci->pins[j]);
err_gpios:
- while (--i >= 0) {
- if (gpio_is_valid(fhci->gpios[i]))
- gpio_free(fhci->gpios[i]);
- }
cpm_muram_free(pram_addr);
err_pram:
iounmap(hcd->regs);
@@ -782,18 +761,12 @@ static int fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
- int i;
int j;
usb_remove_hcd(hcd);
free_irq(fhci->timer->irq, hcd);
gtm_put_timer16(fhci->timer);
cpm_muram_free(cpm_muram_offset(fhci->pram));
- for (i = 0; i < NUM_GPIOS; i++) {
- if (!gpio_is_valid(fhci->gpios[i]))
- continue;
- gpio_free(fhci->gpios[i]);
- }
for (j = 0; j < NUM_PINS; j++)
qe_pin_free(fhci->pins[j]);
fhci_dfs_destroy(fhci);
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index c359dcdb9b13..5f48660ebdfa 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -19,7 +19,7 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include "fhci.h"
@@ -38,13 +38,12 @@ static u8 root_hub_des[] = {
static void fhci_gpio_set_value(struct fhci_hcd *fhci, int gpio_nr, bool on)
{
- int gpio = fhci->gpios[gpio_nr];
- bool alow = fhci->alow_gpios[gpio_nr];
+ struct gpio_desc *gpiod = fhci->gpiods[gpio_nr];
- if (!gpio_is_valid(gpio))
+ if (!gpiod)
return;
- gpio_set_value(gpio, on ^ alow);
+ gpiod_set_value(gpiod, on);
mdelay(5);
}
@@ -129,9 +128,9 @@ void fhci_io_port_generate_reset(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
- gpio_direction_output(fhci->gpios[GPIO_USBOE], 0);
- gpio_direction_output(fhci->gpios[GPIO_USBTP], 0);
- gpio_direction_output(fhci->gpios[GPIO_USBTN], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBOE], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBTP], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBTN], 0);
mdelay(5);
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 81fbc019a9b3..1f57b0989485 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -242,8 +243,7 @@ struct fhci_hcd {
enum qe_clock fullspeed_clk;
enum qe_clock lowspeed_clk;
struct qe_pin *pins[NUM_PINS];
- int gpios[NUM_GPIOS];
- bool alow_gpios[NUM_GPIOS];
+ struct gpio_desc *gpiods[NUM_GPIOS];
struct qe_usb_ctlr __iomem *regs; /* I/O memory used to communicate */
struct fhci_pram __iomem *pram; /* Parameter RAM */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index f8c111e08a0d..3d1dbcf4c073 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5692,7 +5692,6 @@ static int __init fotg210_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 98326465e2dc..533537ef3c21 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -62,8 +62,6 @@ struct ohci_at91_priv {
#define DRIVER_DESC "OHCI Atmel driver"
-static const char hcd_name[] = "ohci-atmel";
-
static struct hc_driver __read_mostly ohci_at91_hc_driver;
static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = {
@@ -699,7 +697,6 @@ static int __init ohci_at91_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_at91_hc_driver, &ohci_at91_drv_overrides);
/*
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 1371b0c249ec..d4818e8d652b 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -551,7 +551,6 @@ static int __init ohci_da8xx_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
/*
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 5f5e8a64c8e2..8d7977fd5d3b 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -21,7 +21,6 @@
#define DRIVER_DESC "OHCI Exynos driver"
-static const char hcd_name[] = "ohci-exynos";
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
@@ -310,7 +309,6 @@ static int __init ohci_exynos_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ohci_driver);
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c4c821c2288c..0457dd9f6c19 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1276,7 +1276,6 @@ static int __init ohci_hcd_mod_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
pr_debug ("%s: block sizes: ed %zd td %zd\n", hcd_name,
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 106a6bcefb08..5b32e683e367 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -275,8 +275,6 @@ static int __init ohci_nxp_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_nxp_hc_driver, NULL);
return platform_driver_register(&ohci_hcd_nxp_driver);
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index f5bc9c8bdc9a..cb29701df911 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -423,8 +423,6 @@ static int __init ohci_omap_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
return platform_driver_register(&ohci_hcd_omap_driver);
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 41efe927d8f3..d7b4f40f9ff4 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -282,7 +282,7 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static int ohci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- return usb_hcd_pci_probe(dev, id, &ohci_pci_hc_driver);
+ return usb_hcd_pci_probe(dev, &ohci_pci_hc_driver);
}
/* pci driver glue; this is a "new style" PCI driver module */
@@ -306,8 +306,6 @@ static int __init ohci_pci_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 0adae6265127..a84305091c43 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -41,8 +41,6 @@ struct ohci_platform_priv {
struct reset_control *resets;
};
-static const char hcd_name[] = "ohci-platform";
-
static int ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -289,7 +287,7 @@ static int ohci_platform_suspend(struct device *dev)
return ret;
}
-static int ohci_platform_resume(struct device *dev)
+static int ohci_platform_resume_common(struct device *dev, bool hibernated)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
@@ -301,7 +299,7 @@ static int ohci_platform_resume(struct device *dev)
return err;
}
- ohci_resume(hcd, false);
+ ohci_resume(hcd, hibernated);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -309,6 +307,16 @@ static int ohci_platform_resume(struct device *dev)
return 0;
}
+
+static int ohci_platform_resume(struct device *dev)
+{
+ return ohci_platform_resume_common(dev, false);
+}
+
+static int ohci_platform_restore(struct device *dev)
+{
+ return ohci_platform_resume_common(dev, true);
+}
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id ohci_platform_ids[] = {
@@ -325,8 +333,16 @@ static const struct platform_device_id ohci_platform_table[] = {
};
MODULE_DEVICE_TABLE(platform, ohci_platform_table);
-static SIMPLE_DEV_PM_OPS(ohci_platform_pm_ops, ohci_platform_suspend,
- ohci_platform_resume);
+#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops ohci_platform_pm_ops = {
+ .suspend = ohci_platform_suspend,
+ .resume = ohci_platform_resume,
+ .freeze = ohci_platform_suspend,
+ .thaw = ohci_platform_resume,
+ .poweroff = ohci_platform_suspend,
+ .restore = ohci_platform_restore,
+};
+#endif
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
@@ -335,7 +351,9 @@ static struct platform_driver ohci_platform_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-platform",
+#ifdef CONFIG_PM_SLEEP
.pm = &ohci_platform_pm_ops,
+#endif
.of_match_table = ohci_platform_ids,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
@@ -346,8 +364,6 @@ static int __init ohci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index ab4f610a0140..a1dad8745622 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -114,8 +114,6 @@
#define PXA_UHC_MAX_PORTNUM 3
-static const char hcd_name[] = "ohci-pxa27x";
-
static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
struct pxa27x_ohci {
@@ -608,8 +606,6 @@ static int __init ohci_pxa27x_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 12264c048601..85a0a9ae0095 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -39,8 +39,6 @@
#define DRIVER_DESC "OHCI S3C2410 driver"
-static const char hcd_name[] = "ohci-s3c2410";
-
static struct clk *clk;
static struct clk *usb_clk;
@@ -474,7 +472,6 @@ static int __init ohci_s3c2410_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
/*
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 9b81f420656d..196951a27f3f 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -23,7 +23,6 @@
#define DRIVER_DESC "OHCI SPEAr driver"
-static const char hcd_name[] = "SPEAr-ohci";
struct spear_ohci {
struct clk *clk;
};
@@ -179,8 +178,6 @@ static int __init ohci_spear_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ohci_hcd_driver);
}
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index ac796ccd93ef..82eef3c62e11 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -40,8 +40,6 @@ struct st_ohci_platform_priv {
#define hcd_to_ohci_priv(h) \
((struct st_ohci_platform_priv *)hcd_to_ohci(h)->priv)
-static const char hcd_name[] = "ohci-st";
-
static int st_ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -324,8 +322,6 @@ static int __init ohci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index d879d6af5710..95240c9c45bd 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3190,7 +3190,6 @@ static int __init u132_hcd_init(void)
u132_exiting = 0;
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index d90b869f5f40..c22b51af83fc 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -867,8 +867,6 @@ static int __init uhci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
- ignore_oc ? ", overcurrent ignored" : "");
set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
#ifdef CONFIG_DYNAMIC_DEBUG
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 9b88745d247f..3592f757fe05 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static int uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- return usb_hcd_pci_probe(dev, id, &uhci_driver);
+ return usb_hcd_pci_probe(dev, &uhci_driver);
}
static struct pci_driver uhci_pci_driver = {
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index e61155fa6379..f1367b53b260 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -988,7 +988,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->driver = driver;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
- return NULL;
+ goto err;
INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
spin_lock_init(&dbc->lock);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8c19e151a945..9e56aa28efcd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -641,7 +641,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
- goto cleanup_ctx;
+ goto cleanup_ring_array;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
@@ -702,6 +702,11 @@ cleanup_rings:
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dce6c0ec8d34..40228a3d77a0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -431,7 +431,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* to say USB 2.0, but I'm not sure what the implications would be in
* the other parts of the HCD code.
*/
- retval = usb_hcd_pci_probe(dev, id, &xhci_pci_hc_driver);
+ retval = usb_hcd_pci_probe(dev, &xhci_pci_hc_driver);
if (retval)
goto put_runtime_pm;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index a8641b6536ee..5fb55bf19493 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -123,7 +123,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
};
static const struct xhci_plat_priv xhci_plat_brcm = {
- .quirks = XHCI_RESET_ON_RESUME,
+ .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -437,7 +437,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend.
*/
- return xhci_suspend(xhci, device_may_wakeup(dev));
+ ret = xhci_suspend(xhci, device_may_wakeup(dev));
+ if (ret)
+ return ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_disable_unprepare(xhci->clk);
+ clk_disable_unprepare(xhci->reg_clk);
+ }
+
+ return 0;
}
static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -446,6 +455,11 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_prepare_enable(xhci->clk);
+ clk_prepare_enable(xhci->reg_clk);
+ }
+
ret = xhci_priv_resume_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 38649284ff88..5176765c4013 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1183,7 +1183,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ if (!xhci->broken_suspend)
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
@@ -1482,7 +1483,7 @@ EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
* address from the XHCI endpoint index.
*/
-unsigned int xhci_get_endpoint_address(unsigned int ep_index)
+static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
{
unsigned int number = DIV_ROUND_UP(ep_index, 2);
unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
@@ -4095,7 +4096,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
slot_id = command->slot_id;
if (!slot_id || command->status != COMP_SUCCESS) {
- xhci_err(xhci, "Error while assigning device slot ID\n");
+ xhci_err(xhci, "Error while assigning device slot ID: %s\n",
+ xhci_trb_comp_code_string(command->status));
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
readl(&xhci->cap_regs->hcs_params1)));
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7caa0db5e826..c0964fe8ac12 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1807,8 +1807,6 @@ struct xhci_hcd {
struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
- /* Store LPM test failed devices' information */
- struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
/* these are not thread safe so use mutex */
@@ -1827,7 +1825,6 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
unsigned long run_graceperiod;
- u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
*
@@ -1899,6 +1896,7 @@ struct xhci_hcd {
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_BROKEN_D3COLD BIT_ULL(41)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
+#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2041,7 +2039,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
-unsigned int xhci_get_endpoint_address(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index e9437a176518..ea39243efee3 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -177,10 +177,6 @@ static int idmouse_create_image(struct usb_idmouse *dev)
bytes_read += bulk_read;
}
- /* reset the device */
-reset:
- ftip_command(dev, FTIP_RELEASE, 0, 0);
-
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
@@ -192,6 +188,10 @@ reset:
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
+ /* reset the device */
+reset:
+ ftip_command(dev, FTIP_RELEASE, 0, 0);
+
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 04c4e3fed094..54337d72bb9f 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -400,7 +400,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
- int len, err;
+ int len;
u32 property_u32 = 0;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
@@ -416,13 +416,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->skip_config = 0;
hub->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (PTR_ERR(hub->gpio_reset) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hub->gpio_reset)) {
- err = PTR_ERR(hub->gpio_reset);
- dev_err(dev, "unable to request GPIO reset pin (%d)\n", err);
- return err;
- }
+ if (IS_ERR(hub->gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(hub->gpio_reset),
+ "unable to request GPIO reset pin\n");
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -547,7 +543,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->boost_up = USB251XB_DEF_BOOST_UP;
cproperty_char = of_get_property(np, "manufacturer", NULL);
- strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
+ strscpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
hub->manufacturer_len = strlen(str) & 0xFF;
memset(hub->manufacturer, 0, USB251XB_STRING_BUFSIZE);
@@ -557,7 +553,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "product", NULL);
- strlcpy(str, cproperty_char ? : data->product_str, sizeof(str));
+ strscpy(str, cproperty_char ? : data->product_str, sizeof(str));
hub->product_len = strlen(str) & 0xFF;
memset(hub->product, 0, USB251XB_STRING_BUFSIZE);
len = min_t(size_t, USB251XB_STRING_BUFSIZE / 2, strlen(str));
@@ -566,7 +562,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "serial", NULL);
- strlcpy(str, cproperty_char ? : USB251XB_DEF_SERIAL_STRING,
+ strscpy(str, cproperty_char ? : USB251XB_DEF_SERIAL_STRING,
sizeof(str));
hub->serial_len = strlen(str) & 0xFF;
memset(hub->serial, 0, USB251XB_STRING_BUFSIZE);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 3c9fa663475f..c70ca475c7c7 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -160,6 +160,7 @@ static int usb3503_probe(struct usb3503 *hub)
struct usb3503_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
int err;
+ bool is_clk_enabled = false;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
enum gpiod_flags flags;
@@ -217,6 +218,7 @@ static int usb3503_probe(struct usb3503 *hub)
return err;
}
+ is_clk_enabled = true;
property = of_get_property(np, "disabled-ports", &len);
if (property && (len / sizeof(u32)) > 0) {
int i;
@@ -236,20 +238,26 @@ static int usb3503_probe(struct usb3503 *hub)
else
flags = GPIOD_OUT_HIGH;
hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
- if (IS_ERR(hub->intn))
- return PTR_ERR(hub->intn);
+ if (IS_ERR(hub->intn)) {
+ err = PTR_ERR(hub->intn);
+ goto err_clk;
+ }
if (hub->intn)
gpiod_set_consumer_name(hub->intn, "usb3503 intn");
hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
- if (IS_ERR(hub->connect))
- return PTR_ERR(hub->connect);
+ if (IS_ERR(hub->connect)) {
+ err = PTR_ERR(hub->connect);
+ goto err_clk;
+ }
if (hub->connect)
gpiod_set_consumer_name(hub->connect, "usb3503 connect");
hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(hub->reset))
- return PTR_ERR(hub->reset);
+ if (IS_ERR(hub->reset)) {
+ err = PTR_ERR(hub->reset);
+ goto err_clk;
+ }
if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
@@ -265,6 +273,11 @@ static int usb3503_probe(struct usb3503 *hub)
(hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
return 0;
+
+err_clk:
+ if (is_clk_enabled)
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int usb3503_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 0be8efcda15d..b00d92db5dfd 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -502,7 +502,7 @@ static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf,
#else
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -563,7 +563,7 @@ static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buff
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -581,7 +581,7 @@ static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, siz
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -614,7 +614,7 @@ static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f48a23adbc35..094e812e9e69 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1268,6 +1268,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 0ca173af87bb..a3a6282893d0 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -978,8 +978,6 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
goto irq_err;
}
- device_init_wakeup(dev, true);
-
/* power down device IP for power saving by default */
mtu3_stop(mtu);
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 4cb65346789d..d78ae52b4e26 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -356,6 +356,8 @@ static int mtu3_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, true);
+
ret = ssusb_rscs_init(ssusb);
if (ret)
goto comm_init_err;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index fd4ae2dd24e5..a4e55b0c52cf 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -523,11 +523,9 @@ static int da8xx_probe(struct platform_device *pdev)
}
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
- if (IS_ERR(glue->phy)) {
- if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get phy\n");
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "failed to get phy\n");
glue->dev = &pdev->dev;
glue->clk = clk;
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 417c30bff9ca..d1e4e0deb753 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -105,7 +105,6 @@ static int jz4740_musb_init(struct musb *musb)
.driver_data = glue,
.fwnode = dev_fwnode(dev),
};
- int err;
glue->musb = musb;
@@ -113,12 +112,9 @@ static int jz4740_musb_init(struct musb *musb)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(musb->xceiv)) {
- err = PTR_ERR(musb->xceiv);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "No transceiver configured: %d\n", err);
- return err;
- }
+ if (IS_ERR(musb->xceiv))
+ return dev_err_probe(dev, PTR_ERR(musb->xceiv),
+ "No transceiver configured\n");
glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
if (IS_ERR(glue->role_sw)) {
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bbbcfd49fb35..03027c6fa3ab 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2595,9 +2595,7 @@ fail2:
musb_platform_exit(musb);
fail1:
- if (status != -EPROBE_DEFER)
- dev_err(musb->controller,
- "%s failed with status %d\n", __func__, status);
+ dev_err_probe(musb->controller, status, "%s failed\n", __func__);
musb_free(musb);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index c963cb8565f2..9589243e8951 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -718,10 +718,8 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
dc = dma_request_chan(dev->parent, str);
if (IS_ERR(dc)) {
- ret = PTR_ERR(dc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request %s: %d.\n",
- str, ret);
+ ret = dev_err_probe(dev, PTR_ERR(dc),
+ "Failed to request %s.\n", str);
goto err;
}
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index daada4b66a92..6704a62a1665 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -760,6 +760,9 @@ static void rxstate(struct musb *musb, struct musb_request *req)
musb_writew(epio, MUSB_RXCSR, csr);
buffer_aint_mapped:
+ fifo_count = min_t(unsigned int,
+ request->length - request->actual,
+ (unsigned int)fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 961c858fb349..7f9a999cd5ff 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -743,31 +743,20 @@ static int sunxi_musb_probe(struct platform_device *pdev)
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
glue->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(glue->rst)) {
- if (PTR_ERR(glue->rst) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Error getting reset %ld\n",
- PTR_ERR(glue->rst));
- return PTR_ERR(glue->rst);
- }
+ if (IS_ERR(glue->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->rst),
+ "Error getting reset\n");
}
glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
- if (IS_ERR(glue->extcon)) {
- if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Invalid or missing extcon\n");
- return PTR_ERR(glue->extcon);
- }
+ if (IS_ERR(glue->extcon))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->extcon),
+ "Invalid or missing extcon\n");
glue->phy = devm_phy_get(&pdev->dev, "usb");
- if (IS_ERR(glue->phy)) {
- if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Error getting phy %ld\n",
- PTR_ERR(glue->phy));
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "Error getting phy\n");
glue->usb_phy = usb_phy_generic_register();
if (IS_ERR(glue->usb_phy)) {
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 34b9f8140187..3dc5c04e7cbf 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -230,12 +230,9 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
- if (err == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (err) {
- dev_err(dev, "Error requesting RESET or VBUS GPIO\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "Error requesting RESET or VBUS GPIO\n");
if (nop->gpiod_reset)
gpiod_direction_output(nop->gpiod_reset, 1);
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index 4025da20b3fd..f16adcacdce3 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -321,27 +321,18 @@ static int jz4770_phy_probe(struct platform_device *pdev)
}
priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock\n");
- return err;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Failed to get clock\n");
priv->vcc_supply = devm_regulator_get(dev, "vcc");
- if (IS_ERR(priv->vcc_supply)) {
- err = PTR_ERR(priv->vcc_supply);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulator\n");
- return err;
- }
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get regulator\n");
err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Unable to register PHY\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Unable to register PHY\n");
return devm_add_action_or_reset(dev, ingenic_usb_phy_remove, &priv->phy);
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8a262c5a0408..d2836ef5d15c 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -144,8 +144,8 @@
#define MXS_PHY_NEED_IP_FIX BIT(3)
/* Minimum and maximum values for device tree entries */
-#define MXS_PHY_TX_CAL45_MIN 30
-#define MXS_PHY_TX_CAL45_MAX 55
+#define MXS_PHY_TX_CAL45_MIN 35
+#define MXS_PHY_TX_CAL45_MAX 54
#define MXS_PHY_TX_D_CAL_MIN 79
#define MXS_PHY_TX_D_CAL_MAX 119
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 68cd4b68e3a2..f0240107edb1 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1440,16 +1440,22 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return err;
}
- gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
- "nvidia,phy-reset-gpio",
- 0, GPIOD_OUT_HIGH,
- "ulpi_phy_reset_b");
+ gpiod = devm_gpiod_get(&pdev->dev, "nvidia,phy-reset",
+ GPIOD_OUT_HIGH);
err = PTR_ERR_OR_ZERO(gpiod);
if (err) {
dev_err(&pdev->dev,
"Request failed for reset GPIO: %d\n", err);
return err;
}
+
+ err = gpiod_set_consumer_name(gpiod, "ulpi_phy_reset_b");
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to set up reset GPIO name: %d\n", err);
+ return err;
+ }
+
tegra_phy->reset_gpio = gpiod;
phy = devm_otg_ulpi_create(&pdev->dev,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 39eaa7b97c40..9452291f1703 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -189,7 +189,7 @@ static void ark3116_port_remove(struct usb_serial_port *port)
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 8107e4b5b03b..9331a562dac0 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -44,7 +44,8 @@ static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios * old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
static int belkin_sa_tiocmget(struct tty_struct *tty);
static int belkin_sa_tiocmset(struct tty_struct *tty,
@@ -273,7 +274,8 @@ static void belkin_sa_process_read_urb(struct urb *urb)
}
static void belkin_sa_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index af01a462cc43..6e1b87e67304 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -106,7 +106,7 @@ struct ch341_private {
static void ch341_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static int ch341_control_out(struct usb_device *dev, u8 request,
u16 value, u16 index)
@@ -482,7 +482,8 @@ err_kill_interrupt_urb:
* tty->termios contains the new setting to be used.
*/
static void ch341_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index b97aa40ca4d1..da19a5fa414f 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -189,8 +189,8 @@ static int usb_console_setup(struct console *co, char *options)
info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
- usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
+ usb_serial_put(serial);
return retval;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a34957c4b64c..3bcec419f463 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -31,9 +31,9 @@
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
- struct ktermios *);
+ const struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
- struct ktermios*);
+ const struct ktermios *);
static bool cp210x_tx_empty(struct usb_serial_port *port);
static int cp210x_tiocmget(struct tty_struct *);
static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
@@ -1040,7 +1040,8 @@ static speed_t cp210x_get_actual_rate(speed_t baud)
* otherwise.
*/
static void cp210x_change_speed(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -1122,7 +1123,8 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
}
static void cp210x_set_flow_control(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
@@ -1232,7 +1234,8 @@ out_unlock:
}
static void cp210x_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
u16 bits;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 5fbcc155e8f5..1e0c028c5ec9 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -125,7 +125,8 @@ static void cypress_send(struct usb_serial_port *port);
static unsigned int cypress_write_room(struct tty_struct *tty);
static void cypress_earthmate_init_termios(struct tty_struct *tty);
static void cypress_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int cypress_tiocmget(struct tty_struct *tty);
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -859,7 +860,8 @@ static void cypress_earthmate_init_termios(struct tty_struct *tty)
}
static void cypress_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index af65eb863d70..45d688e9b93f 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -215,7 +215,8 @@ static int digi_transmit_idle(struct usb_serial_port *port,
static void digi_rx_throttle(struct tty_struct *tty);
static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
@@ -649,7 +650,8 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
static void digi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct digi_port *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index d9f20256a6a8..2dd58cd9f0cc 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -603,7 +603,8 @@ static int f81232_port_disable(struct usb_serial_port *port)
}
static void f81232_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
u8 new_lcr = 0;
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index d789c1ec87b3..ddfcd72eb0ae 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -944,8 +944,8 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
}
static void f81534_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
u8 new_lcr = 0;
int status;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 52d59be92034..05e28a5ce42b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -47,10 +47,27 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
+enum ftdi_chip_type {
+ SIO,
+ FT232A,
+ FT232B,
+ FT2232C,
+ FT232R,
+ FT232H,
+ FT2232H,
+ FT4232H,
+ FT4232HA,
+ FT232HP,
+ FT233HP,
+ FT2232HP,
+ FT2233HP,
+ FT4232HP,
+ FT4233HP,
+ FTX,
+};
struct ftdi_private {
enum ftdi_chip_type chip_type;
- /* type of device, either SIO or FT8U232AM */
int baud_base; /* baud base clock for divisor setting */
int custom_divisor; /* custom_divisor kludge, this is for
baud_base (different from what goes to the
@@ -62,8 +79,7 @@ struct ftdi_private {
unsigned long last_dtr_rts; /* saved modem control outputs */
char prev_status; /* Used for TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
- u16 interface; /* FT2232C, FT2232H or FT4232H port interface
- (0 for FT232/245) */
+ u16 channel; /* channel index, or 0 for legacy types */
speed_t force_baud; /* if non-zero, force the baud rate to
this value */
@@ -84,8 +100,7 @@ struct ftdi_private {
#endif
};
-/* struct ftdi_sio_quirk is used by devices requiring special attention. */
-struct ftdi_sio_quirk {
+struct ftdi_quirk {
int (*probe)(struct usb_serial *);
/* Special settings for probed ports. */
void (*port_probe)(struct ftdi_private *);
@@ -98,27 +113,27 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
-static const struct ftdi_sio_quirk ftdi_jtag_quirk = {
+static const struct ftdi_quirk ftdi_jtag_quirk = {
.probe = ftdi_jtag_probe,
};
-static const struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
+static const struct ftdi_quirk ftdi_NDI_device_quirk = {
.probe = ftdi_NDI_device_setup,
};
-static const struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
+static const struct ftdi_quirk ftdi_USB_UIRT_quirk = {
.port_probe = ftdi_USB_UIRT_setup,
};
-static const struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+static const struct ftdi_quirk ftdi_HE_TIRA1_quirk = {
.port_probe = ftdi_HE_TIRA1_setup,
};
-static const struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+static const struct ftdi_quirk ftdi_stmclite_quirk = {
.probe = ftdi_stmclite_probe,
};
-static const struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+static const struct ftdi_quirk ftdi_8u2232c_quirk = {
.probe = ftdi_8u2232c_probe,
};
@@ -180,6 +195,13 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FTX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT2233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT2232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4232HA_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -1061,15 +1083,22 @@ static const struct usb_device_id id_table_combined[] = {
MODULE_DEVICE_TABLE(usb, id_table_combined);
static const char *ftdi_chip_name[] = {
- [SIO] = "SIO", /* the serial part of FT8U100AX */
- [FT8U232AM] = "FT8U232AM",
- [FT232BM] = "FT232BM",
- [FT2232C] = "FT2232C",
- [FT232RL] = "FT232RL",
- [FT2232H] = "FT2232H",
- [FT4232H] = "FT4232H",
- [FT232H] = "FT232H",
- [FTX] = "FT-X"
+ [SIO] = "SIO", /* the serial part of FT8U100AX */
+ [FT232A] = "FT232A",
+ [FT232B] = "FT232B",
+ [FT2232C] = "FT2232C/D",
+ [FT232R] = "FT232R",
+ [FT232H] = "FT232H",
+ [FT2232H] = "FT2232H",
+ [FT4232H] = "FT4232H",
+ [FT4232HA] = "FT4232HA",
+ [FT232HP] = "FT232HP",
+ [FT233HP] = "FT233HP",
+ [FT2232HP] = "FT2232HP",
+ [FT2233HP] = "FT2233HP",
+ [FT4232HP] = "FT4232HP",
+ [FT4233HP] = "FT4233HP",
+ [FTX] = "FT-X",
};
@@ -1078,74 +1107,12 @@ static const char *ftdi_chip_name[] = {
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
/* End TIOCMIWAIT */
-/* function prototypes for a FTDI serial converter */
-static int ftdi_sio_probe(struct usb_serial *serial,
- const struct usb_device_id *id);
-static int ftdi_sio_port_probe(struct usb_serial_port *port);
-static void ftdi_sio_port_remove(struct usb_serial_port *port);
-static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
-static void ftdi_process_read_urb(struct urb *urb);
-static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
static void ftdi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
-static int ftdi_tiocmget(struct tty_struct *tty);
-static int ftdi_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int ftdi_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-static void get_serial_info(struct tty_struct *tty, struct serial_struct *ss);
-static int set_serial_info(struct tty_struct *tty,
- struct serial_struct *ss);
-static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static bool ftdi_tx_empty(struct usb_serial_port *port);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2]);
-static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
-static unsigned short int ftdi_232am_baud_to_divisor(int baud);
-static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
-static u32 ftdi_232bm_baud_to_divisor(int baud);
-static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
-static u32 ftdi_2232h_baud_to_divisor(int baud);
-
-static struct usb_serial_driver ftdi_sio_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "ftdi_sio",
- },
- .description = "FTDI USB Serial Device",
- .id_table = id_table_combined,
- .num_ports = 1,
- .bulk_in_size = 512,
- .bulk_out_size = 256,
- .probe = ftdi_sio_probe,
- .port_probe = ftdi_sio_port_probe,
- .port_remove = ftdi_sio_port_remove,
- .open = ftdi_open,
- .dtr_rts = ftdi_dtr_rts,
- .throttle = usb_serial_generic_throttle,
- .unthrottle = usb_serial_generic_unthrottle,
- .process_read_urb = ftdi_process_read_urb,
- .prepare_write_buffer = ftdi_prepare_write_buffer,
- .tiocmget = ftdi_tiocmget,
- .tiocmset = ftdi_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
- .get_icount = usb_serial_generic_get_icount,
- .ioctl = ftdi_ioctl,
- .get_serial = get_serial_info,
- .set_serial = set_serial_info,
- .set_termios = ftdi_set_termios,
- .break_ctl = ftdi_break_ctl,
- .tx_empty = ftdi_tx_empty,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &ftdi_sio_device, NULL
-};
-
-
#define WDR_TIMEOUT 5000 /* default urb timeout */
#define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */
@@ -1261,7 +1228,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
- value, priv->interface,
+ value, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(dev, "%s Error from MODEM_CTRL urb: DTR %s, RTS %s\n",
@@ -1307,7 +1274,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
if (!baud)
baud = 9600;
switch (priv->chip_type) {
- case SIO: /* SIO chip */
+ case SIO:
switch (baud) {
case 300: div_value = ftdi_sio_b300; break;
case 600: div_value = ftdi_sio_b600; break;
@@ -1319,8 +1286,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
case 38400: div_value = ftdi_sio_b38400; break;
case 57600: div_value = ftdi_sio_b57600; break;
case 115200: div_value = ftdi_sio_b115200; break;
- } /* baud */
- if (div_value == 0) {
+ default:
dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n",
__func__, baud);
div_value = ftdi_sio_b9600;
@@ -1328,7 +1294,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
div_okay = 0;
}
break;
- case FT8U232AM: /* 8U232AM chip */
+ case FT232A:
if (baud <= 3000000) {
div_value = ftdi_232am_baud_to_divisor(baud);
} else {
@@ -1338,10 +1304,10 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
div_okay = 0;
}
break;
- case FT232BM: /* FT232BM chip */
- case FT2232C: /* FT2232C chip */
- case FT232RL: /* FT232RL chip */
- case FTX: /* FT-X series */
+ case FT232B:
+ case FT2232C:
+ case FT232R:
+ case FTX:
if (baud <= 3000000) {
u16 product_id = le16_to_cpu(
port->serial->dev->descriptor.idProduct);
@@ -1361,9 +1327,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
baud = 9600;
}
break;
- case FT2232H: /* FT2232H chip */
- case FT4232H: /* FT4232H chip */
- case FT232H: /* FT232H chip */
+ default:
if ((baud <= 12000000) && (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
@@ -1375,7 +1339,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
baud = 9600;
}
break;
- } /* priv->chip_type */
+ }
if (div_okay) {
dev_dbg(dev, "%s - Baud rate set to %d (divisor 0x%lX) on chip %s\n",
@@ -1398,13 +1362,8 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
index_value = get_ftdi_divisor(tty, port);
value = (u16)index_value;
index = (u16)(index_value >> 16);
- if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H || priv->chip_type == FT232H ||
- priv->chip_type == FTX) {
- /* Probably the BM type needs the MSB of the encoded fractional
- * divider also moved like for the chips above. Any infos? */
- index = (u16)((index << 8) | priv->interface);
- }
+ if (priv->channel)
+ index = (u16)((index << 8) | priv->channel);
rv = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
@@ -1422,7 +1381,7 @@ static int write_latency_timer(struct usb_serial_port *port)
int rv;
int l = priv->latency;
- if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1434,7 +1393,7 @@ static int write_latency_timer(struct usb_serial_port *port)
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- l, priv->interface,
+ l, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
@@ -1450,7 +1409,7 @@ static int _read_latency_timer(struct usb_serial_port *port)
rv = usb_control_msg_recv(udev, 0, FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0,
- priv->interface, &buf, 1, WDR_TIMEOUT,
+ priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (rv == 0)
rv = buf;
@@ -1463,7 +1422,7 @@ static int read_latency_timer(struct usb_serial_port *port)
struct ftdi_private *priv = usb_get_serial_port_data(port);
int rv;
- if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
rv = _read_latency_timer(port);
@@ -1538,90 +1497,97 @@ static int get_lsr_info(struct usb_serial_port *port,
return 0;
}
-
-/* Determine type of FTDI chip based on USB config and descriptor. */
-static void ftdi_determine_type(struct usb_serial_port *port)
+static int ftdi_determine_type(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
struct usb_device *udev = serial->dev;
- unsigned version;
- unsigned interfaces;
-
- /* Assume it is not the original SIO device for now. */
- priv->baud_base = 48000000 / 2;
+ unsigned int version, ifnum;
version = le16_to_cpu(udev->descriptor.bcdDevice);
- interfaces = udev->actconfig->desc.bNumInterfaces;
- dev_dbg(&port->dev, "%s: bcdDevice = 0x%x, bNumInterfaces = %u\n", __func__,
- version, interfaces);
- if (interfaces > 1) {
- struct usb_interface *intf = serial->interface;
- int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-
- /* Multiple interfaces.*/
- if (version == 0x0800) {
- priv->chip_type = FT4232H;
- /* Hi-speed - baud clock runs at 120MHz */
- priv->baud_base = 120000000 / 2;
- } else if (version == 0x0700) {
- priv->chip_type = FT2232H;
- /* Hi-speed - baud clock runs at 120MHz */
- priv->baud_base = 120000000 / 2;
- } else
- priv->chip_type = FT2232C;
-
- /* Determine interface code. */
- if (ifnum == 0)
- priv->interface = INTERFACE_A;
- else if (ifnum == 1)
- priv->interface = INTERFACE_B;
- else if (ifnum == 2)
- priv->interface = INTERFACE_C;
- else if (ifnum == 3)
- priv->interface = INTERFACE_D;
-
- /* BM-type devices have a bug where bcdDevice gets set
- * to 0x200 when iSerialNumber is 0. */
- if (version < 0x500) {
- dev_dbg(&port->dev,
- "%s: something fishy - bcdDevice too low for multi-interface device\n",
- __func__);
- }
- } else if (version < 0x200) {
- /* Old device. Assume it's the original SIO. */
- priv->chip_type = SIO;
- priv->baud_base = 12000000 / 16;
- } else if (version < 0x400) {
- /* Assume it's an FT8U232AM (or FT8U245AM) */
- priv->chip_type = FT8U232AM;
+ ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+
+ /* Assume Hi-Speed type */
+ priv->baud_base = 120000000 / 2;
+ priv->channel = CHANNEL_A + ifnum;
+
+ switch (version) {
+ case 0x200:
+ priv->chip_type = FT232A;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
/*
- * It might be a BM type because of the iSerialNumber bug.
- * If iSerialNumber==0 and the latency timer is readable,
- * assume it is BM type.
+ * FT232B devices have a bug where bcdDevice gets set to 0x200
+ * when iSerialNumber is 0. Assume it is an FT232B in case the
+ * latency timer is readable.
*/
if (udev->descriptor.iSerialNumber == 0 &&
_read_latency_timer(port) >= 0) {
- dev_dbg(&port->dev,
- "%s: has latency timer so not an AM type\n",
- __func__);
- priv->chip_type = FT232BM;
+ priv->chip_type = FT232B;
}
- } else if (version < 0x600) {
- /* Assume it's an FT232BM (or FT245BM) */
- priv->chip_type = FT232BM;
- } else if (version < 0x900) {
- /* Assume it's an FT232RL */
- priv->chip_type = FT232RL;
- } else if (version < 0x1000) {
- /* Assume it's an FT232H */
+ break;
+ case 0x400:
+ priv->chip_type = FT232B;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
+ break;
+ case 0x500:
+ priv->chip_type = FT2232C;
+ priv->baud_base = 48000000 / 2;
+ break;
+ case 0x600:
+ priv->chip_type = FT232R;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
+ break;
+ case 0x700:
+ priv->chip_type = FT2232H;
+ break;
+ case 0x800:
+ priv->chip_type = FT4232H;
+ break;
+ case 0x900:
priv->chip_type = FT232H;
- } else {
- /* Assume it's an FT-X series device */
+ break;
+ case 0x1000:
priv->chip_type = FTX;
+ priv->baud_base = 48000000 / 2;
+ break;
+ case 0x2800:
+ priv->chip_type = FT2233HP;
+ break;
+ case 0x2900:
+ priv->chip_type = FT4233HP;
+ break;
+ case 0x3000:
+ priv->chip_type = FT2232HP;
+ break;
+ case 0x3100:
+ priv->chip_type = FT4232HP;
+ break;
+ case 0x3200:
+ priv->chip_type = FT233HP;
+ break;
+ case 0x3300:
+ priv->chip_type = FT232HP;
+ break;
+ case 0x3600:
+ priv->chip_type = FT4232HA;
+ break;
+ default:
+ if (version < 0x200) {
+ priv->chip_type = SIO;
+ priv->baud_base = 12000000 / 16;
+ priv->channel = 0;
+ } else {
+ dev_err(&port->dev, "unknown device type: 0x%02x\n", version);
+ return -ENODEV;
+ }
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
+
+ return 0;
}
@@ -1720,7 +1686,7 @@ static ssize_t event_char_store(struct device *dev,
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
- v, priv->interface,
+ v, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(&port->dev, "Unable to write event character: %i\n", rv);
@@ -1731,51 +1697,42 @@ static ssize_t event_char_store(struct device *dev,
}
static DEVICE_ATTR_WO(event_char);
-static int create_sysfs_attrs(struct usb_serial_port *port)
-{
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int retval = 0;
-
- /* XXX I've no idea if the original SIO supports the event_char
- * sysfs parameter, so I'm playing it safe. */
- if (priv->chip_type != SIO) {
- dev_dbg(&port->dev, "sysfs attributes for %s\n", ftdi_chip_name[priv->chip_type]);
- retval = device_create_file(&port->dev, &dev_attr_event_char);
- if ((!retval) &&
- (priv->chip_type == FT232BM ||
- priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL ||
- priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H ||
- priv->chip_type == FT232H ||
- priv->chip_type == FTX)) {
- retval = device_create_file(&port->dev,
- &dev_attr_latency_timer);
- }
- }
- return retval;
-}
+static struct attribute *ftdi_attrs[] = {
+ &dev_attr_event_char.attr,
+ &dev_attr_latency_timer.attr,
+ NULL
+};
-static void remove_sysfs_attrs(struct usb_serial_port *port)
+static umode_t ftdi_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ enum ftdi_chip_type type = priv->chip_type;
- /* XXX see create_sysfs_attrs */
- if (priv->chip_type != SIO) {
- device_remove_file(&port->dev, &dev_attr_event_char);
- if (priv->chip_type == FT232BM ||
- priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL ||
- priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H ||
- priv->chip_type == FT232H ||
- priv->chip_type == FTX) {
- device_remove_file(&port->dev, &dev_attr_latency_timer);
- }
+ if (attr == &dev_attr_event_char.attr) {
+ if (type == SIO)
+ return 0;
+ }
+
+ if (attr == &dev_attr_latency_timer.attr) {
+ if (type == SIO || type == FT232A)
+ return 0;
}
+ return attr->mode;
}
+static const struct attribute_group ftdi_group = {
+ .attrs = ftdi_attrs,
+ .is_visible = ftdi_is_visible,
+};
+
+static const struct attribute_group *ftdi_groups[] = {
+ &ftdi_group,
+ NULL
+};
+
#ifdef CONFIG_GPIOLIB
static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
@@ -1794,7 +1751,7 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
usb_sndctrlpipe(serial->dev, 0),
FTDI_SIO_SET_BITMODE_REQUEST,
FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
- priv->interface, NULL, 0, WDR_TIMEOUT);
+ priv->channel, NULL, 0, WDR_TIMEOUT);
if (result < 0) {
dev_err(&serial->interface->dev,
"bitmode request failed for value 0x%04x: %d\n",
@@ -1858,7 +1815,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
result = usb_control_msg_recv(serial->dev, 0,
FTDI_SIO_READ_PINS_REQUEST,
FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
- priv->interface, &buf, 1, WDR_TIMEOUT,
+ priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (result == 0)
result = buf;
@@ -2143,7 +2100,7 @@ static int ftdi_gpio_init(struct usb_serial_port *port)
case FT232H:
result = ftdi_gpio_init_ft232h(port);
break;
- case FT232RL:
+ case FT232R:
result = ftdi_gpio_init_ft232r(port);
break;
case FTX:
@@ -2213,12 +2170,9 @@ static void ftdi_gpio_remove(struct usb_serial_port *port) { }
* ***************************************************************************
*/
-/* Probe function to check for special devices */
-static int ftdi_sio_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
+static int ftdi_probe(struct usb_serial *serial, const struct usb_device_id *id)
{
- const struct ftdi_sio_quirk *quirk =
- (struct ftdi_sio_quirk *)id->driver_info;
+ const struct ftdi_quirk *quirk = (struct ftdi_quirk *)id->driver_info;
if (quirk && quirk->probe) {
int ret = quirk->probe(serial);
@@ -2231,10 +2185,10 @@ static int ftdi_sio_probe(struct usb_serial *serial,
return 0;
}
-static int ftdi_sio_port_probe(struct usb_serial_port *port)
+static int ftdi_port_probe(struct usb_serial_port *port)
{
+ const struct ftdi_quirk *quirk = usb_get_serial_data(port->serial);
struct ftdi_private *priv;
- const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
@@ -2248,12 +2202,14 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
- ftdi_determine_type(port);
+ result = ftdi_determine_type(port);
+ if (result)
+ goto err_free;
+
ftdi_set_max_packet_size(port);
if (read_latency_timer(port) < 0)
priv->latency = 16;
write_latency_timer(port);
- create_sysfs_attrs(port);
result = ftdi_gpio_init(port);
if (result < 0) {
@@ -2263,6 +2219,11 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
}
return 0;
+
+err_free:
+ kfree(priv);
+
+ return result;
}
/* Setup for the USB-UIRT device, which requires hardwired
@@ -2373,14 +2334,12 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
return 0;
}
-static void ftdi_sio_port_remove(struct usb_serial_port *port)
+static void ftdi_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
ftdi_gpio_remove(port);
- remove_sysfs_attrs(port);
-
kfree(priv);
}
@@ -2394,7 +2353,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
- priv->interface, NULL, 0, WDR_TIMEOUT);
+ priv->channel, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
@@ -2417,7 +2376,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, NULL, 0,
+ 0, priv->channel, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
@@ -2610,7 +2569,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
- value , priv->interface,
+ value, priv->channel,
NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state (state was %d)\n",
__func__, break_state);
@@ -2640,7 +2599,8 @@ static bool ftdi_tx_empty(struct usb_serial_port *port)
* WARNING: set_termios calls this with old_termios in kernel space
*/
static void ftdi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct device *ddev = &port->dev;
@@ -2746,7 +2706,7 @@ no_skip:
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
- value , priv->interface,
+ value, priv->channel,
NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(ddev, "%s FAILED to set databits/stopbits/parity\n",
__func__);
@@ -2759,7 +2719,7 @@ no_data_parity_stop_changes:
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface,
+ 0, priv->channel,
NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(ddev, "%s error from disable flowcontrol urb\n",
__func__);
@@ -2793,7 +2753,7 @@ no_c_cflag_changes:
index = FTDI_SIO_DISABLE_FLOW_CTRL;
}
- index |= priv->interface;
+ index |= priv->channel;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
@@ -2821,33 +2781,19 @@ static int ftdi_get_modem_status(struct usb_serial_port *port,
if (!buf)
return -ENOMEM;
/*
- * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
- * the same format as the data returned from the in point.
+ * The device returns a two byte value (the SIO a 1 byte value) in the
+ * same format as the data returned from the IN endpoint.
*/
- switch (priv->chip_type) {
- case SIO:
+ if (priv->chip_type == SIO)
len = 1;
- break;
- case FT8U232AM:
- case FT232BM:
- case FT2232C:
- case FT232RL:
- case FT2232H:
- case FT4232H:
- case FT232H:
- case FTX:
+ else
len = 2;
- break;
- default:
- ret = -EFAULT;
- goto out;
- }
ret = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
FTDI_SIO_GET_MODEM_STATUS_REQUEST,
FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, priv->interface,
+ 0, priv->channel,
buf, len, WDR_TIMEOUT);
/* NOTE: We allow short responses and handle that below. */
@@ -2917,6 +2863,41 @@ static int ftdi_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+static struct usb_serial_driver ftdi_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ftdi_sio",
+ .dev_groups = ftdi_groups,
+ },
+ .description = "FTDI USB Serial Device",
+ .id_table = id_table_combined,
+ .num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 256,
+ .probe = ftdi_probe,
+ .port_probe = ftdi_port_probe,
+ .port_remove = ftdi_port_remove,
+ .open = ftdi_open,
+ .dtr_rts = ftdi_dtr_rts,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = ftdi_process_read_urb,
+ .prepare_write_buffer = ftdi_prepare_write_buffer,
+ .tiocmget = ftdi_tiocmget,
+ .tiocmset = ftdi_tiocmset,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
+ .get_icount = usb_serial_generic_get_icount,
+ .ioctl = ftdi_ioctl,
+ .get_serial = get_serial_info,
+ .set_serial = set_serial_info,
+ .set_termios = ftdi_set_termios,
+ .break_ctl = ftdi_break_ctl,
+ .tx_empty = ftdi_tx_empty,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &ftdi_device, NULL
+};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index be1641e0408b..55ea61264f91 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -40,11 +40,11 @@
#define FTDI_SIO_READ_PINS 0x0c /* Read immediate value of pins */
#define FTDI_SIO_READ_EEPROM 0x90 /* Read EEPROM */
-/* Interface indices for FT2232, FT2232H and FT4232H devices */
-#define INTERFACE_A 1
-#define INTERFACE_B 2
-#define INTERFACE_C 3
-#define INTERFACE_D 4
+/* Channel indices for FT2232, FT2232H and FT4232H devices */
+#define CHANNEL_A 1
+#define CHANNEL_B 2
+#define CHANNEL_C 3
+#define CHANNEL_D 4
/*
@@ -153,18 +153,6 @@
* not supported by the FT8U232AM).
*/
-enum ftdi_chip_type {
- SIO = 1,
- FT8U232AM = 2,
- FT232BM = 3,
- FT2232C = 4,
- FT232RL = 5,
- FT2232H = 6,
- FT4232H = 7,
- FT232H = 8,
- FTX = 9,
-};
-
enum ftdi_sio_baudrate {
ftdi_sio_b300 = 0,
ftdi_sio_b600 = 1,
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 31c8ccabbbb7..e2099445db70 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -25,6 +25,13 @@
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */
+#define FTDI_FT2233HP_PID 0x6040 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4233HP_PID 0x6041 /* Quad channel hi-speed device with PD */
+#define FTDI_FT2232HP_PID 0x6042 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4232HP_PID 0x6043 /* Quad channel hi-speed device with PD */
+#define FTDI_FT233HP_PID 0x6044 /* Dual channel hi-speed device with PD */
+#define FTDI_FT232HP_PID 0x6045 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4232HA_PID 0x6048 /* Quad channel automotive grade hi-speed device */
#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index ffa622539a25..3a4c0febf335 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -281,7 +281,7 @@ static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command,
static int calc_baud_rate_divisor(struct device *dev, int baud_rate, int *divisor);
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
__u8 regNum, __u8 regValue);
static int write_cmd_usb(struct edgeport_port *edge_port,
@@ -1441,7 +1441,8 @@ static void edge_unthrottle(struct tty_struct *tty)
* the termios structure
*****************************************************************************/
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
@@ -2325,7 +2326,7 @@ static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
*****************************************************************************/
static void change_port_settings(struct tty_struct *tty,
- struct edgeport_port *edge_port, struct ktermios *old_termios)
+ struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct edgeport_serial *edge_serial =
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index feba2a8d1233..bc3c24ea42c1 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -221,7 +221,8 @@ static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void edge_send(struct usb_serial_port *port, struct tty_struct *tty);
static int do_download_mode(struct edgeport_serial *serial,
@@ -2210,7 +2211,7 @@ static int restart_read(struct edgeport_port *edge_port)
}
static void change_port_settings(struct tty_struct *tty,
- struct edgeport_port *edge_port, struct ktermios *old_termios)
+ struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct ump_uart_config *config;
@@ -2351,7 +2352,8 @@ static void change_port_settings(struct tty_struct *tty,
}
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 7b44dbea95cd..82f108134e6f 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -51,7 +51,8 @@ static unsigned int ir_write_room(struct tty_struct *tty);
static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
/* Not that this lot means you can only have one per system */
static u8 ir_baud;
@@ -376,7 +377,8 @@ static void ir_process_read_urb(struct urb *urb)
}
static void ir_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 0be3b5e1eaf3..77cba71bcccb 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -879,7 +879,8 @@ static int iuu_uart_baud(struct usb_serial_port *port, u32 baud_base,
}
static void iuu_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
const u32 supported_mask = CMSPAR|PARENB|PARODD;
struct iuu_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 1cfcd805f286..2966e0c4941e 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -616,7 +616,8 @@ static void keyspan_break_ctl(struct tty_struct *tty, int break_state)
static void keyspan_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
int baud_rate, device_port;
struct keyspan_port_private *p_priv;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 3e7628becdcd..6fd15cd9e1eb 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -321,7 +321,8 @@ static void keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
}
static void keyspan_pda_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
speed_t speed;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index edcc57bd9b5e..394b3189e003 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -56,7 +56,8 @@ static void klsi_105_port_remove(struct usb_serial_port *port);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
static void klsi_105_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int klsi_105_tiocmget(struct tty_struct *tty);
static void klsi_105_process_read_urb(struct urb *urb);
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
@@ -366,7 +367,7 @@ static void klsi_105_process_read_urb(struct urb *urb)
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 4ed8b8b0a361..5e775f68fcb8 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -62,7 +62,8 @@ static int kobil_tiocmset(struct tty_struct *tty,
static void kobil_read_int_callback(struct urb *urb);
static void kobil_write_int_callback(struct urb *urb);
static void kobil_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
static const struct usb_device_id id_table[] = {
@@ -474,7 +475,8 @@ static int kobil_tiocmset(struct tty_struct *tty,
}
static void kobil_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old)
+ struct usb_serial_port *port,
+ const struct ktermios *old)
{
struct kobil_private *priv;
int result;
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ecd5b921e374..d3852feb81a4 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -45,7 +45,8 @@ static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty);
static int mct_u232_tiocmset(struct tty_struct *tty,
@@ -593,7 +594,7 @@ exit:
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 23ccbba716c7..1d1f85fabc28 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1356,7 +1356,7 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
*/
static void change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7720_port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial_port *port;
struct usb_serial *serial;
@@ -1494,7 +1494,8 @@ static void change_port_settings(struct tty_struct *tty,
* termios structure.
*/
static void mos7720_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
int status;
struct moschip_port *mos7720_port;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 925067a7978d..6b12bb4648b8 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1188,7 +1188,8 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
*****************************************************************************/
static void mos7840_change_port_settings(struct tty_struct *tty,
- struct moschip_port *mos7840_port, struct ktermios *old_termios)
+ struct moschip_port *mos7840_port,
+ const struct ktermios *old_termios)
{
struct usb_serial_port *port = mos7840_port->port;
int baud;
@@ -1330,7 +1331,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index eb45a9b0005c..faa0eedfe245 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -760,7 +760,7 @@ static int mxuport_tiocmget(struct tty_struct *tty)
}
static int mxuport_set_termios_flow(struct tty_struct *tty,
- struct ktermios *old_termios,
+ const struct ktermios *old_termios,
struct usb_serial_port *port,
struct usb_serial *serial)
{
@@ -834,7 +834,7 @@ out:
static void mxuport_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
u8 *buf;
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a5caedbe72e2..6365cfe5402c 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -119,7 +119,8 @@ struct oti6858_control_pkt {
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port);
static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void oti6858_init_termios(struct tty_struct *tty);
static void oti6858_read_int_callback(struct urb *urb);
static void oti6858_read_bulk_callback(struct urb *urb);
@@ -395,7 +396,8 @@ static void oti6858_init_termios(struct tty_struct *tty)
}
static void oti6858_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 40b1ab3d284d..8949c1891164 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -789,7 +789,8 @@ static bool pl2303_enable_xonxoff(struct tty_struct *tty, const struct pl2303_ty
}
static void pl2303_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 586ef5551e76..b1e844bf31f8 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */
{DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 36b1e064e51f..6fca40ace83a 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -261,8 +261,8 @@ static int qt2_calc_num_ports(struct usb_serial *serial,
}
static void qt2_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv;
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 7039dc918827..09a972a838ee 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -283,7 +283,8 @@ static void spcp8x5_init_termios(struct tty_struct *tty)
}
static void spcp8x5_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 181e302136a5..1e1888b66305 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -214,8 +214,8 @@ out: kfree(data);
static void ssu100_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct ktermios *termios = &tty->termios;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 18c0bd853392..b99f78224846 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -314,7 +314,8 @@ static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
static void ti_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int ti_tiocmget(struct tty_struct *tty);
static int ti_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -892,7 +893,8 @@ static void ti_unthrottle(struct tty_struct *tty)
}
static void ti_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ti_port *tport = usb_get_serial_port_data(port);
struct ti_uart_config *config;
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 63d4a784ae45..c47439bd90fa 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -296,8 +296,8 @@ static speed_t upd78f0730_get_baud_rate(struct tty_struct *tty)
}
static void upd78f0730_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct device *dev = &port->dev;
struct upd78f0730_line_control request;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index e35bea2235c1..164521ee10c6 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -519,7 +519,8 @@ static int serial_ioctl(struct tty_struct *tty,
return retval;
}
-static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void serial_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 332fb92ae575..7f82d40753ee 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -82,7 +82,8 @@ static void whiteheat_close(struct usb_serial_port *port);
static void whiteheat_get_serial(struct tty_struct *tty,
struct serial_struct *ss);
static void whiteheat_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int whiteheat_tiocmget(struct tty_struct *tty);
static int whiteheat_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -442,7 +443,8 @@ static void whiteheat_get_serial(struct tty_struct *tty, struct serial_struct *s
static void whiteheat_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
firm_setup_port(tty);
}
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index 6853cd56d8dc..f3811e060a44 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -104,7 +104,8 @@ static int xr21v141x_uart_enable(struct usb_serial_port *port);
static int xr21v141x_uart_disable(struct usb_serial_port *port);
static int xr21v141x_fifo_reset(struct usb_serial_port *port);
static void xr21v141x_set_line_settings(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
struct xr_type {
int reg_width;
@@ -133,8 +134,8 @@ struct xr_type {
int (*disable)(struct usb_serial_port *port);
int (*fifo_reset)(struct usb_serial_port *port);
void (*set_line_settings)(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
};
enum xr_type_id {
@@ -622,8 +623,8 @@ static int xr21v141x_set_baudrate(struct tty_struct *tty, struct usb_serial_port
}
static void xr_set_flow_mode(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
@@ -674,7 +675,8 @@ static void xr_set_flow_mode(struct tty_struct *tty,
}
static void xr21v141x_set_line_settings(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ktermios *termios = &tty->termios;
u8 bits = 0;
@@ -732,7 +734,8 @@ static void xr21v141x_set_line_settings(struct tty_struct *tty,
}
static void xr_cdc_set_line_coding(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
struct usb_host_interface *alt = port->serial->interface->cur_altsetting;
@@ -809,7 +812,8 @@ static void xr_cdc_set_line_coding(struct tty_struct *tty,
}
static void xr_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 1db2eefeea22..01f3c2779ccf 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -201,7 +201,7 @@ static int onetouch_connect_input(struct us_data *ss)
onetouch->dev = input_dev;
if (udev->manufacturer)
- strlcpy(onetouch->name, udev->manufacturer,
+ strscpy(onetouch->name, udev->manufacturer,
sizeof(onetouch->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 84dc270f6f73..de3836412bf3 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -283,7 +283,7 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
set_host_byte(cmnd, DID_OK);
break;
case RC_TMF_NOT_SUPPORTED:
- set_host_byte(cmnd, DID_TARGET_FAILURE);
+ set_host_byte(cmnd, DID_BAD_TARGET);
break;
default:
uas_log_cmd_state(cmnd, "response iu", response_code);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4993227ab293..20dcbccb290b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1275,12 +1275,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
-UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
- "Samsung",
- "Flash Drive FIT",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_MAX_SECTORS_64),
-
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index e205f409589a..b8f3b75fd7eb 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1105,7 +1105,7 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
int ret;
struct device_node *node;
- node = of_find_node_by_name(dev->of_node, "orientation_switch");
+ node = of_get_child_by_name(dev->of_node, "orientation_switch");
if (!node)
return 0;
@@ -1115,7 +1115,7 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
return ret;
}
- node = of_find_node_by_name(dev->of_node, "mode_switch");
+ node = of_get_child_by_name(dev->of_node, "mode_switch");
if (!node) {
dev_err(dev, "no typec mux exist");
ret = -ENODEV;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 464330776cd6..941735c73161 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -29,7 +29,7 @@ static int switch_fwnode_match(struct device *dev, const void *fwnode)
if (!is_typec_switch_dev(dev))
return 0;
- return dev_fwnode(dev) == fwnode;
+ return device_match_fwnode(dev, fwnode);
}
static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
@@ -259,7 +259,7 @@ static int mux_fwnode_match(struct device *dev, const void *fwnode)
if (!is_typec_mux_dev(dev))
return 0;
- return dev_fwnode(dev) == fwnode;
+ return device_match_fwnode(dev, fwnode);
}
static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
diff --git a/drivers/usb/typec/qcom-pmic-typec.c b/drivers/usb/typec/qcom-pmic-typec.c
index a0454a80c4a2..432ea62f1bab 100644
--- a/drivers/usb/typec/qcom-pmic-typec.c
+++ b/drivers/usb/typec/qcom-pmic-typec.c
@@ -195,9 +195,8 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
qcom_usb->role_sw = fwnode_usb_role_switch_get(dev_fwnode(qcom_usb->dev));
if (IS_ERR(qcom_usb->role_sw)) {
- if (PTR_ERR(qcom_usb->role_sw) != -EPROBE_DEFER)
- dev_err(dev, "failed to get role switch\n");
- ret = PTR_ERR(qcom_usb->role_sw);
+ ret = dev_err_probe(dev, PTR_ERR(qcom_usb->role_sw),
+ "failed to get role switch\n");
goto err_typec_port;
}
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
index 2003731f1bee..ee94dbbe4745 100644
--- a/drivers/usb/typec/retimer.c
+++ b/drivers/usb/typec/retimer.c
@@ -31,7 +31,7 @@ static bool dev_name_ends_with(struct device *dev, const char *suffix)
static int retimer_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-retimer");
+ return device_match_fwnode(dev, fwnode) && dev_name_ends_with(dev, "-retimer");
}
static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data)
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index 8638f1d39896..494b371151e0 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -750,11 +750,8 @@ static int stusb160x_probe(struct i2c_client *client)
if (client->irq) {
chip->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(chip->role_sw)) {
- ret = PTR_ERR(chip->role_sw);
- if (ret != -EPROBE_DEFER)
- dev_err(chip->dev,
- "Failed to get usb role switch: %d\n",
- ret);
+ ret = dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
+ "Failed to get usb role switch\n");
goto port_unregister;
}
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 073fd2ea5e0b..e6b88ca4a4b9 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -35,6 +35,17 @@ config TYPEC_MT6360
USB Type-C. It works with Type-C Port Controller Manager
to provide USB PD and USB Type-C functionalities.
+config TYPEC_TCPCI_MT6370
+ tristate "MediaTek MT6370 Type-C driver"
+ depends on MFD_MT6370
+ help
+ MediaTek MT6370 is a multi-functional IC that includes
+ USB Type-C. It works with Type-C Port Controller Manager
+ to provide USB PD and USB Type-C functionalities.
+
+ This driver can also be built as a module. The module
+ will be called "tcpci_mt6370".
+
config TYPEC_TCPCI_MAXIM
tristate "Maxim TCPCI based Type-C chip driver"
help
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index 7d499f3569fd..906d9dced8e7 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -6,4 +6,5 @@ typec_wcove-y := wcove.o
obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
+obj-$(CONFIG_TYPEC_TCPCI_MT6370) += tcpci_mt6370.o
obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 5e9348f28d50..721b2a548084 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -151,7 +151,7 @@ static void _fusb302_log(struct fusb302_chip *chip, const char *fmt,
if (fusb302_log_full(chip)) {
chip->logbuffer_head = max(chip->logbuffer_head - 1, 0);
- strlcpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
+ strscpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
}
if (chip->logbuffer_head < 0 ||
@@ -1743,9 +1743,8 @@ static int fusb302_probe(struct i2c_client *client,
chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
if (IS_ERR(chip->tcpm_port)) {
fwnode_handle_put(chip->tcpc_dev.fwnode);
- ret = PTR_ERR(chip->tcpm_port);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "cannot register tcpm port, ret=%d", ret);
+ ret = dev_err_probe(dev, PTR_ERR(chip->tcpm_port),
+ "cannot register tcpm port\n");
goto destroy_workqueue;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f00810d198a8..b2bfcebe218f 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -27,11 +27,6 @@
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
-#define tcpc_presenting_rd(reg, cc) \
- (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
-
struct tcpci {
struct device *dev;
@@ -218,23 +213,6 @@ static int tcpci_start_toggling(struct tcpc_dev *tcpc,
TCPC_CMD_LOOK4CONNECTION);
}
-static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
-{
- switch (cc) {
- case 0x1:
- return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
- case 0x2:
- return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
- case 0x3:
- if (sink)
- return TYPEC_CC_RP_3_0;
- fallthrough;
- case 0x0:
- default:
- return TYPEC_CC_OPEN;
- }
-}
-
static int tcpci_get_cc(struct tcpc_dev *tcpc,
enum typec_cc_status *cc1, enum typec_cc_status *cc2)
{
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
new file mode 100644
index 000000000000..c5bb201a5163
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/tcpci.h>
+#include <linux/usb/tcpm.h>
+
+#define MT6370_REG_SYSCTRL8 0x9B
+
+#define MT6370_AUTOIDLE_MASK BIT(3)
+
+#define MT6370_VENDOR_ID 0x29CF
+#define MT6370_TCPC_DID_A 0x2170
+
+struct mt6370_priv {
+ struct device *dev;
+ struct regulator *vbus;
+ struct tcpci *tcpci;
+ struct tcpci_data tcpci_data;
+};
+
+static const struct reg_sequence mt6370_reg_init[] = {
+ REG_SEQ(0xA0, 0x1, 1000),
+ REG_SEQ(0x81, 0x38, 0),
+ REG_SEQ(0x82, 0x82, 0),
+ REG_SEQ(0xBA, 0xFC, 0),
+ REG_SEQ(0xBB, 0x50, 0),
+ REG_SEQ(0x9E, 0x8F, 0),
+ REG_SEQ(0xA1, 0x5, 0),
+ REG_SEQ(0xA2, 0x4, 0),
+ REG_SEQ(0xA3, 0x4A, 0),
+ REG_SEQ(0xA4, 0x01, 0),
+ REG_SEQ(0x95, 0x01, 0),
+ REG_SEQ(0x80, 0x71, 0),
+ REG_SEQ(0x9B, 0x3A, 1000),
+};
+
+static int mt6370_tcpc_init(struct tcpci *tcpci, struct tcpci_data *data)
+{
+ u16 did;
+ int ret;
+
+ ret = regmap_register_patch(data->regmap, mt6370_reg_init,
+ ARRAY_SIZE(mt6370_reg_init));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_read(data->regmap, TCPC_BCD_DEV, &did, sizeof(u16));
+ if (ret)
+ return ret;
+
+ if (did == MT6370_TCPC_DID_A)
+ return regmap_write(data->regmap, TCPC_FAULT_CTRL, 0x80);
+
+ return 0;
+}
+
+static int mt6370_tcpc_set_vconn(struct tcpci *tcpci, struct tcpci_data *data,
+ bool enable)
+{
+ return regmap_update_bits(data->regmap, MT6370_REG_SYSCTRL8,
+ MT6370_AUTOIDLE_MASK,
+ enable ? 0 : MT6370_AUTOIDLE_MASK);
+}
+
+static int mt6370_tcpc_set_vbus(struct tcpci *tcpci, struct tcpci_data *data,
+ bool source, bool sink)
+{
+ struct mt6370_priv *priv = container_of(data, struct mt6370_priv,
+ tcpci_data);
+ int ret;
+
+ ret = regulator_is_enabled(priv->vbus);
+ if (ret < 0)
+ return ret;
+
+ if (ret && !source)
+ return regulator_disable(priv->vbus);
+
+ if (!ret && source)
+ return regulator_enable(priv->vbus);
+
+ return 0;
+}
+
+static irqreturn_t mt6370_irq_handler(int irq, void *dev_id)
+{
+ struct mt6370_priv *priv = dev_id;
+
+ return tcpci_irq(priv->tcpci);
+}
+
+static int mt6370_check_vendor_info(struct mt6370_priv *priv)
+{
+ struct regmap *regmap = priv->tcpci_data.regmap;
+ u16 vid;
+ int ret;
+
+ ret = regmap_raw_read(regmap, TCPC_VENDOR_ID, &vid, sizeof(u16));
+ if (ret)
+ return ret;
+
+ if (vid != MT6370_VENDOR_ID)
+ return dev_err_probe(priv->dev, -ENODEV,
+ "Vendor ID not correct 0x%02x\n", vid);
+
+ return 0;
+}
+
+static void mt6370_unregister_tcpci_port(void *tcpci)
+{
+ tcpci_unregister_port(tcpci);
+}
+
+static int mt6370_tcpc_probe(struct platform_device *pdev)
+{
+ struct mt6370_priv *priv;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->tcpci_data.regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->tcpci_data.regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to init regmap\n");
+
+ ret = mt6370_check_vendor_info(priv);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get irq\n");
+
+ /* Assign TCPCI feature and ops */
+ priv->tcpci_data.auto_discharge_disconnect = 1;
+ priv->tcpci_data.init = mt6370_tcpc_init;
+ priv->tcpci_data.set_vconn = mt6370_tcpc_set_vconn;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (!IS_ERR(priv->vbus))
+ priv->tcpci_data.set_vbus = mt6370_tcpc_set_vbus;
+
+ priv->tcpci = tcpci_register_port(dev, &priv->tcpci_data);
+ if (IS_ERR(priv->tcpci))
+ return dev_err_probe(dev, PTR_ERR(priv->tcpci),
+ "Failed to register tcpci port\n");
+
+ ret = devm_add_action_or_reset(dev, mt6370_unregister_tcpci_port, priv->tcpci);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mt6370_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to allocate irq\n");
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+
+ return 0;
+}
+
+static int mt6370_tcpc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_tcpc_devid_table[] = {
+ { .compatible = "mediatek,mt6370-tcpc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_tcpc_devid_table);
+
+static struct platform_driver mt6370_tcpc_driver = {
+ .driver = {
+ .name = "mt6370-tcpc",
+ .of_match_table = mt6370_tcpc_devid_table,
+ },
+ .probe = mt6370_tcpc_probe,
+ .remove = mt6370_tcpc_remove,
+};
+module_platform_driver(mt6370_tcpc_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("MT6370 USB Type-C Port Controller Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index c1327713f06d..7b217c712c11 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -5,6 +5,7 @@
* Richtek RT1711H Type-C Chip Driver
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -13,16 +14,27 @@
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define RT1711H_VID 0x29CF
#define RT1711H_PID 0x1711
+#define RT1711H_DID 0x2171
+#define RT1715_DID 0x2173
-#define RT1711H_RTCTRL8 0x9B
+#define RT1711H_PHYCTRL1 0x80
+#define RT1711H_PHYCTRL2 0x81
+
+#define RT1711H_RTCTRL4 0x93
+/* rx threshold of rd/rp: 1b0 for level 0.4V/0.7V, 1b1 for 0.35V/0.75V */
+#define RT1711H_BMCIO_RXDZSEL BIT(0)
+#define RT1711H_RTCTRL8 0x9B
/* Autoidle timeout = (tout * 2 + 1) * 6.4ms */
#define RT1711H_RTCTRL8_SET(ck300, ship_off, auto_idle, tout) \
(((ck300) << 7) | ((ship_off) << 5) | \
((auto_idle) << 3) | ((tout) & 0x07))
+#define RT1711H_AUTOIDLEEN BIT(3)
+#define RT1711H_ENEXTMSG BIT(4)
#define RT1711H_RTCTRL11 0x9E
@@ -35,10 +47,17 @@
#define RT1711H_RTCTRL15 0xA2
#define RT1711H_RTCTRL16 0xA3
+#define RT1711H_RTCTRL18 0xAF
+/* 1b0 as fixed rx threshold of rd/rp 0.55V, 1b1 depends on RTCRTL4[0] */
+#define BMCIO_RXDZEN BIT(0)
+
struct rt1711h_chip {
struct tcpci_data data;
struct tcpci *tcpci;
struct device *dev;
+ struct regulator *vbus;
+ bool src_en;
+ u16 did;
};
static int rt1711h_read16(struct rt1711h_chip *chip, unsigned int reg, u16 *val)
@@ -75,8 +94,9 @@ static struct rt1711h_chip *tdata_to_rt1711h(struct tcpci_data *tdata)
static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
{
- int ret;
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
+ struct regmap *regmap = chip->data.regmap;
+ int ret;
/* CK 300K from 320K, shipping off, auto_idle enable, tout = 32ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL8,
@@ -84,6 +104,14 @@ static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
if (ret < 0)
return ret;
+ /* Enable PD30 extended message for RT1715 */
+ if (chip->did == RT1715_DID) {
+ ret = regmap_update_bits(regmap, RT1711H_RTCTRL8,
+ RT1711H_ENEXTMSG, RT1711H_ENEXTMSG);
+ if (ret < 0)
+ return ret;
+ }
+
/* I2C reset : (val + 1) * 12.5ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL11,
RT1711H_RTCTRL11_SET(1, 0x0F));
@@ -101,7 +129,37 @@ static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
return ret;
/* dcSRC.DRP : 33% */
- return rt1711h_write16(chip, RT1711H_RTCTRL16, 330);
+ ret = rt1711h_write16(chip, RT1711H_RTCTRL16, 330);
+ if (ret < 0)
+ return ret;
+
+ /* Enable phy discard retry, retry count 7, rx filter deglitch 100 us */
+ ret = rt1711h_write8(chip, RT1711H_PHYCTRL1, 0xF1);
+ if (ret < 0)
+ return ret;
+
+ /* Decrease wait time of BMC-encoded 1 bit from 2.67us to 2.55us */
+ /* wait time : (val * .4167) us */
+ return rt1711h_write8(chip, RT1711H_PHYCTRL2, 62);
+}
+
+static int rt1711h_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata,
+ bool src, bool snk)
+{
+ struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
+ int ret;
+
+ if (chip->src_en == src)
+ return 0;
+
+ if (src)
+ ret = regulator_enable(chip->vbus);
+ else
+ ret = regulator_disable(chip->vbus);
+
+ if (!ret)
+ chip->src_en = src;
+ return ret;
}
static int rt1711h_set_vconn(struct tcpci *tcpci, struct tcpci_data *tdata,
@@ -109,8 +167,55 @@ static int rt1711h_set_vconn(struct tcpci *tcpci, struct tcpci_data *tdata,
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
- return rt1711h_write8(chip, RT1711H_RTCTRL8,
- RT1711H_RTCTRL8_SET(0, 1, !enable, 2));
+ return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL8,
+ RT1711H_AUTOIDLEEN, enable ? 0 : RT1711H_AUTOIDLEEN);
+}
+
+/*
+ * Selects the CC PHY noise filter voltage level according to the remote current
+ * CC voltage level.
+ *
+ * @status: The port's current cc status read from IC
+ * Return 0 if writes succeed; failure code otherwise
+ */
+static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
+{
+ int ret, cc1, cc2;
+ u8 role = 0;
+ u32 rxdz_en, rxdz_sel;
+
+ ret = rt1711h_read8(chip, TCPC_ROLE_CTRL, &role);
+ if (ret < 0)
+ return ret;
+
+ cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) &
+ TCPC_CC_STATUS_CC1_MASK,
+ status & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_rd(role, CC1));
+ cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) &
+ TCPC_CC_STATUS_CC2_MASK,
+ status & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_rd(role, CC2));
+
+ if ((cc1 >= TYPEC_CC_RP_1_5 && cc2 < TYPEC_CC_RP_DEF) ||
+ (cc2 >= TYPEC_CC_RP_1_5 && cc1 < TYPEC_CC_RP_DEF)) {
+ rxdz_en = BMCIO_RXDZEN;
+ if (chip->did == RT1715_DID)
+ rxdz_sel = RT1711H_BMCIO_RXDZSEL;
+ else
+ rxdz_sel = 0;
+ } else {
+ rxdz_en = 0;
+ rxdz_sel = RT1711H_BMCIO_RXDZSEL;
+ }
+
+ ret = regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL18,
+ BMCIO_RXDZEN, rxdz_en);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL4,
+ RT1711H_BMCIO_RXDZSEL, rxdz_sel);
}
static int rt1711h_start_drp_toggling(struct tcpci *tcpci,
@@ -173,6 +278,8 @@ static irqreturn_t rt1711h_irq(int irq, void *dev_id)
/* Clear cc change event triggered by starting toggling */
if (status & TCPC_CC_STATUS_TOGGLING)
rt1711h_write8(chip, TCPC_ALERT, TCPC_ALERT_CC_STATUS);
+ else
+ rt1711h_init_cc_params(chip, status);
}
out:
@@ -191,7 +298,7 @@ static int rt1711h_sw_reset(struct rt1711h_chip *chip)
return 0;
}
-static int rt1711h_check_revision(struct i2c_client *i2c)
+static int rt1711h_check_revision(struct i2c_client *i2c, struct rt1711h_chip *chip)
{
int ret;
@@ -209,7 +316,15 @@ static int rt1711h_check_revision(struct i2c_client *i2c)
dev_err(&i2c->dev, "pid is not correct, 0x%04x\n", ret);
return -ENODEV;
}
- return 0;
+ ret = i2c_smbus_read_word_data(i2c, TCPC_BCD_DEV);
+ if (ret < 0)
+ return ret;
+ if (ret != chip->did) {
+ dev_err(&i2c->dev, "did is not correct, 0x%04x\n", ret);
+ return -ENODEV;
+ }
+ dev_dbg(&i2c->dev, "did is 0x%04x\n", ret);
+ return ret;
}
static int rt1711h_probe(struct i2c_client *client,
@@ -218,16 +333,18 @@ static int rt1711h_probe(struct i2c_client *client,
int ret;
struct rt1711h_chip *chip;
- ret = rt1711h_check_revision(client);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->did = (size_t)device_get_match_data(&client->dev);
+
+ ret = rt1711h_check_revision(client, chip);
if (ret < 0) {
dev_err(&client->dev, "check vid/pid fail\n");
return ret;
}
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
chip->data.regmap = devm_regmap_init_i2c(client,
&rt1711h_regmap_config);
if (IS_ERR(chip->data.regmap))
@@ -245,7 +362,12 @@ static int rt1711h_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ chip->vbus = devm_regulator_get(&client->dev, "vbus");
+ if (IS_ERR(chip->vbus))
+ return PTR_ERR(chip->vbus);
+
chip->data.init = rt1711h_init;
+ chip->data.set_vbus = rt1711h_set_vbus;
chip->data.set_vconn = rt1711h_set_vconn;
chip->data.start_drp_toggling = rt1711h_start_drp_toggling;
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
@@ -272,13 +394,15 @@ static void rt1711h_remove(struct i2c_client *client)
static const struct i2c_device_id rt1711h_id[] = {
{ "rt1711h", 0 },
+ { "rt1715", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt1711h_id);
#ifdef CONFIG_OF
static const struct of_device_id rt1711h_of_match[] = {
- { .compatible = "richtek,rt1711h", },
+ { .compatible = "richtek,rt1711h", .data = (void *)RT1711H_DID },
+ { .compatible = "richtek,rt1715", .data = (void *)RT1715_DID },
{},
};
MODULE_DEVICE_TABLE(of, rt1711h_of_match);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 6364f0d467ea..74fb5a4c6f21 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1067,11 +1067,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
cap->fwnode = ucsi_find_fwnode(con);
con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
- if (IS_ERR(con->usb_role_sw)) {
- dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
- con->num);
- return PTR_ERR(con->usb_role_sw);
- }
+ if (IS_ERR(con->usb_role_sw))
+ return dev_err_probe(ucsi->dev, PTR_ERR(con->usb_role_sw),
+ "con%d: failed to get usb role switch\n", con->num);
/* Delay other interactions with the con until registration is complete */
mutex_lock(&con->lock);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 349756335362..835f1c4372ba 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -125,6 +125,11 @@ struct version_format {
#define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
#define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
+/* Firmware for Tegra doesn't support UCSI ALT command, built
+ * for NVIDIA has known issue of reporting wrong capability info
+ */
+#define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
+
/* Altmode offset for NVIDIA Function Test Board (FTB) */
#define NVIDIA_FTB_DP_OFFSET (2)
#define NVIDIA_FTB_DBG_OFFSET (3)
@@ -513,6 +518,7 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
+ struct ucsi_capability *cap;
struct ucsi_altmode *alt;
int ret;
@@ -536,6 +542,12 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
ucsi_ccg_nvidia_altmode(uc, alt);
}
break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ cap = val;
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
default:
break;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 16289ff583b4..7b92f0c8de70 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -599,7 +599,7 @@ static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi)
g0->i2c_bl = i2c_new_dummy_device(g0->client->adapter, STM32G0_I2C_BL_ADDR);
if (IS_ERR(g0->i2c_bl)) {
ret = dev_err_probe(g0->dev, PTR_ERR(g0->i2c_bl),
- "Failed to register booloader I2C address\n");
+ "Failed to register bootloader I2C address\n");
return ret;
}
}
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 77a5b3f8736a..e8c3131a8543 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -100,7 +100,7 @@ static int add_match_busid(char *busid)
for (i = 0; i < MAX_BUSID; i++) {
spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
- strlcpy(busid_table[i].name, busid, BUSID_SIZE);
+ strscpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 5dd41e8215e0..fc01b31bbb87 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -464,7 +464,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
int nents;
int num_urbs = 1;
int pipe = get_pipe(sdev, pdu);
- int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
+ int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG;
int support_sg = 1;
int np = 0;
int ret, i;
@@ -514,7 +514,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
num_urbs = nents;
priv->completed_urbs = 0;
pdu->u.cmd_submit.transfer_flags &=
- ~URB_DMA_MAP_SG;
+ ~USBIP_URB_DMA_MAP_SG;
}
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 2ab99244bc31..053a2bca4c47 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -344,6 +344,91 @@ static unsigned int tweak_transfer_flags(unsigned int flags)
return flags;
}
+/*
+ * USBIP driver packs URB transfer flags in PDUs that are exchanged
+ * between Server (usbip_host) and Client (vhci_hcd). URB_* flags
+ * are internal to kernel and could change. Where as USBIP URB flags
+ * exchanged in PDUs are USBIP user API must not change.
+ *
+ * USBIP_URB* flags are exported as explicit API and client and server
+ * do mapping from kernel flags to USBIP_URB*. Details as follows:
+ *
+ * Client tx path (USBIP_CMD_SUBMIT):
+ * - Maps URB_* to USBIP_URB_* when it sends USBIP_CMD_SUBMIT packet.
+ *
+ * Server rx path (USBIP_CMD_SUBMIT):
+ * - Maps USBIP_URB_* to URB_* when it receives USBIP_CMD_SUBMIT packet.
+ *
+ * Flags aren't included in USBIP_CMD_UNLINK and USBIP_RET_SUBMIT packets
+ * and no special handling is needed for them in the following cases:
+ * - Server rx path (USBIP_CMD_UNLINK)
+ * - Client rx path & Server tx path (USBIP_RET_SUBMIT)
+ *
+ * Code paths:
+ * usbip_pack_pdu() is the common routine that handles packing pdu from
+ * urb and unpack pdu to an urb.
+ *
+ * usbip_pack_cmd_submit() and usbip_pack_ret_submit() handle
+ * USBIP_CMD_SUBMIT and USBIP_RET_SUBMIT respectively.
+ *
+ * usbip_map_urb_to_usbip() and usbip_map_usbip_to_urb() are used
+ * by usbip_pack_cmd_submit() and usbip_pack_ret_submit() to map
+ * flags.
+ */
+
+struct urb_to_usbip_flags {
+ u32 urb_flag;
+ u32 usbip_flag;
+};
+
+#define NUM_USBIP_FLAGS 17
+
+static const struct urb_to_usbip_flags flag_map[NUM_USBIP_FLAGS] = {
+ {URB_SHORT_NOT_OK, USBIP_URB_SHORT_NOT_OK},
+ {URB_ISO_ASAP, USBIP_URB_ISO_ASAP},
+ {URB_NO_TRANSFER_DMA_MAP, USBIP_URB_NO_TRANSFER_DMA_MAP},
+ {URB_ZERO_PACKET, USBIP_URB_ZERO_PACKET},
+ {URB_NO_INTERRUPT, USBIP_URB_NO_INTERRUPT},
+ {URB_FREE_BUFFER, USBIP_URB_FREE_BUFFER},
+ {URB_DIR_IN, USBIP_URB_DIR_IN},
+ {URB_DIR_OUT, USBIP_URB_DIR_OUT},
+ {URB_DIR_MASK, USBIP_URB_DIR_MASK},
+ {URB_DMA_MAP_SINGLE, USBIP_URB_DMA_MAP_SINGLE},
+ {URB_DMA_MAP_PAGE, USBIP_URB_DMA_MAP_PAGE},
+ {URB_DMA_MAP_SG, USBIP_URB_DMA_MAP_SG},
+ {URB_MAP_LOCAL, USBIP_URB_MAP_LOCAL},
+ {URB_SETUP_MAP_SINGLE, USBIP_URB_SETUP_MAP_SINGLE},
+ {URB_SETUP_MAP_LOCAL, USBIP_URB_SETUP_MAP_LOCAL},
+ {URB_DMA_SG_COMBINED, USBIP_URB_DMA_SG_COMBINED},
+ {URB_ALIGNED_TEMP_BUFFER, USBIP_URB_ALIGNED_TEMP_BUFFER},
+};
+
+static unsigned int urb_to_usbip(unsigned int flags)
+{
+ unsigned int map_flags = 0;
+ int loop;
+
+ for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
+ if (flags & flag_map[loop].urb_flag)
+ map_flags |= flag_map[loop].usbip_flag;
+ }
+
+ return map_flags;
+}
+
+static unsigned int usbip_to_urb(unsigned int flags)
+{
+ unsigned int map_flags = 0;
+ int loop;
+
+ for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
+ if (flags & flag_map[loop].usbip_flag)
+ map_flags |= flag_map[loop].urb_flag;
+ }
+
+ return map_flags;
+}
+
static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb,
int pack)
{
@@ -354,14 +439,14 @@ static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb,
* will be discussed when usbip is ported to other operating systems.
*/
if (pack) {
- spdu->transfer_flags =
- tweak_transfer_flags(urb->transfer_flags);
+ /* map after tweaking the urb flags */
+ spdu->transfer_flags = urb_to_usbip(tweak_transfer_flags(urb->transfer_flags));
spdu->transfer_buffer_length = urb->transfer_buffer_length;
spdu->start_frame = urb->start_frame;
spdu->number_of_packets = urb->number_of_packets;
spdu->interval = urb->interval;
} else {
- urb->transfer_flags = spdu->transfer_flags;
+ urb->transfer_flags = usbip_to_urb(spdu->transfer_flags);
urb->transfer_buffer_length = spdu->transfer_buffer_length;
urb->start_frame = spdu->start_frame;
urb->number_of_packets = spdu->number_of_packets;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 7badf5777597..febdc99b51a7 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -600,6 +600,11 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
+ if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
+ config.device_features =
+ nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
+ }
/* Skip checking capability if user didn't prefer to configure any
* device networking attributes. It is likely that user might have used
@@ -799,51 +804,76 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
return msg->len;
}
-static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
- struct sk_buff *msg, u64 features,
+static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
- if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
+ (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
return 0;
- val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
+ val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
+
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
}
+static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ u16 val_u16;
+
+ if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
+ return 0;
+
+ val_u16 = __virtio16_to_cpu(true, config->mtu);
+
+ return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
+}
+
+static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
+ return 0;
+
+ return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
+ sizeof(config->mac), config->mac);
+}
+
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
{
struct virtio_net_config config = {};
- u64 features;
+ u64 features_device;
u16 val_u16;
- vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
-
- if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
- config.mac))
- return -EMSGSIZE;
+ vdev->config->get_config(vdev, 0, &config, sizeof(config));
val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = __virtio16_to_cpu(true, config.mtu);
- if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
- return -EMSGSIZE;
+ features_device = vdev->config->get_device_features(vdev);
- features = vdev->config->get_driver_features(vdev);
- if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
VDPA_ATTR_PAD))
return -EMSGSIZE;
- return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
+ if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
}
static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
+ u64 features_driver;
+ u8 status = 0;
u32 device_id;
void *hdr;
int err;
@@ -867,6 +897,17 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
goto msg_err;
}
+ /* only read driver features after the feature negotiation is done */
+ status = vdev->config->get_status(vdev);
+ if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ features_driver = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
+ VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ }
+
switch (device_id) {
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 225b7f5d8be3..b071f0d842fb 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,6 +18,7 @@
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <linux/iova.h>
+#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
@@ -245,13 +246,22 @@ static const struct dma_map_ops vdpasim_dma_ops = {
static const struct vdpa_config_ops vdpasim_config_ops;
static const struct vdpa_config_ops vdpasim_batch_config_ops;
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
+ const struct vdpa_dev_set_config *config)
{
const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
int i, ret = -ENOMEM;
+ if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (config->device_features &
+ ~dev_attr->supported_features)
+ return ERR_PTR(-EINVAL);
+ dev_attr->supported_features =
+ config->device_features;
+ }
+
if (batch_mapping)
ops = &vdpasim_batch_config_ops;
else
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 061986f30911..0e78737dcc16 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -71,7 +71,8 @@ struct vdpasim {
spinlock_t iommu_lock;
};
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr);
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr,
+ const struct vdpa_dev_set_config *config);
/* TODO: cross-endian support */
static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index c8bfea3b7db2..c6db1a1baf76 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -383,7 +383,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_blk_work;
dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 886449e88502..c3cb225ea469 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -254,7 +254,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_net_work;
dev_attr.buffer_size = PAGE_SIZE;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
@@ -294,7 +294,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
- 1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ 1 << VDPA_ATTR_DEV_NET_CFG_MTU |
+ 1 << VDPA_ATTR_DEV_FEATURES),
.max_supported_vqs = VDPASIM_NET_VQ_NUM,
.supported_features = VDPASIM_NET_FEATURES,
};
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 04522077735b..d448db0c4de3 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -17,6 +17,7 @@
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/vdpa.h>
#define VP_VDPA_QUEUE_MAX 256
#define VP_VDPA_DRIVER_NAME "vp_vdpa"
@@ -35,6 +36,7 @@ struct vp_vdpa {
struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
+ u64 device_features;
char msix_name[VP_VDPA_NAME_SIZE];
int config_irq;
int queues;
@@ -66,9 +68,9 @@ static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
- struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- return vp_modern_get_features(mdev);
+ return vp_vdpa->device_features;
}
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
@@ -475,6 +477,7 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
struct vp_vdpa *vp_vdpa = NULL;
+ u64 device_features;
int ret, i;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
@@ -491,6 +494,20 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;
+ device_features = vp_modern_get_features(mdev);
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (add_config->device_features & ~device_features) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Try to provision features "
+ "that are not supported by the device: "
+ "device_features 0x%llx provisioned 0x%llx\n",
+ device_features, add_config->device_features);
+ goto err;
+ }
+ device_features = add_config->device_features;
+ }
+ vp_vdpa->device_features = device_features;
+
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
dev_err(&pdev->dev,
@@ -599,6 +616,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
+ mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa_mgtdev);
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 6130d00252ed..86c381ceb9a1 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -3,6 +3,7 @@ menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
select IOMMU_API
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ select INTERVAL_TREE
help
VFIO provides a framework for secure userspace device drivers.
See Documentation/driver-api/vfio.rst for more details.
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 1a32357592e3..b693a1169286 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
-vfio-y += vfio_main.o
-
obj-$(CONFIG_VFIO) += vfio.o
+
+vfio-y += vfio_main.o \
+ iova_bitmap.o \
+ container.o
+
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c
new file mode 100644
index 000000000000..d74164abbf40
--- /dev/null
+++ b/drivers/vfio/container.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ *
+ * VFIO container (/dev/vfio/vfio)
+ */
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/iommu.h>
+#include <linux/miscdevice.h>
+#include <linux/vfio.h>
+#include <uapi/linux/vfio.h>
+
+#include "vfio.h"
+
+struct vfio_container {
+ struct kref kref;
+ struct list_head group_list;
+ struct rw_semaphore group_lock;
+ struct vfio_iommu_driver *iommu_driver;
+ void *iommu_data;
+ bool noiommu;
+};
+
+static struct vfio {
+ struct list_head iommu_drivers_list;
+ struct mutex iommu_drivers_lock;
+} vfio;
+
+#ifdef CONFIG_VFIO_NOIOMMU
+bool vfio_noiommu __read_mostly;
+module_param_named(enable_unsafe_noiommu_mode,
+ vfio_noiommu, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
+#endif
+
+static void *vfio_noiommu_open(unsigned long arg)
+{
+ if (arg != VFIO_NOIOMMU_IOMMU)
+ return ERR_PTR(-EINVAL);
+ if (!capable(CAP_SYS_RAWIO))
+ return ERR_PTR(-EPERM);
+
+ return NULL;
+}
+
+static void vfio_noiommu_release(void *iommu_data)
+{
+}
+
+static long vfio_noiommu_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == VFIO_CHECK_EXTENSION)
+ return vfio_noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
+
+ return -ENOTTY;
+}
+
+static int vfio_noiommu_attach_group(void *iommu_data,
+ struct iommu_group *iommu_group, enum vfio_group_type type)
+{
+ return 0;
+}
+
+static void vfio_noiommu_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+}
+
+static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
+ .name = "vfio-noiommu",
+ .owner = THIS_MODULE,
+ .open = vfio_noiommu_open,
+ .release = vfio_noiommu_release,
+ .ioctl = vfio_noiommu_ioctl,
+ .attach_group = vfio_noiommu_attach_group,
+ .detach_group = vfio_noiommu_detach_group,
+};
+
+/*
+ * Only noiommu containers can use vfio-noiommu and noiommu containers can only
+ * use vfio-noiommu.
+ */
+static bool vfio_iommu_driver_allowed(struct vfio_container *container,
+ const struct vfio_iommu_driver *driver)
+{
+ if (!IS_ENABLED(CONFIG_VFIO_NOIOMMU))
+ return true;
+ return container->noiommu == (driver->ops == &vfio_noiommu_ops);
+}
+
+/*
+ * IOMMU driver registration
+ */
+int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver, *tmp;
+
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
+
+ driver->ops = ops;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+
+ /* Check for duplicates */
+ list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
+ if (tmp->ops == ops) {
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return -EINVAL;
+ }
+ }
+
+ list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
+
+void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ if (driver->ops == ops) {
+ list_del(&driver->vfio_next);
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return;
+ }
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
+
+/*
+ * Container objects - containers are created when /dev/vfio/vfio is
+ * opened, but their lifecycle extends until the last user is done, so
+ * it's freed via kref. Must support container/group/device being
+ * closed in any order.
+ */
+static void vfio_container_release(struct kref *kref)
+{
+ struct vfio_container *container;
+ container = container_of(kref, struct vfio_container, kref);
+
+ kfree(container);
+}
+
+static void vfio_container_get(struct vfio_container *container)
+{
+ kref_get(&container->kref);
+}
+
+static void vfio_container_put(struct vfio_container *container)
+{
+ kref_put(&container->kref, vfio_container_release);
+}
+
+void vfio_device_container_register(struct vfio_device *device)
+{
+ struct vfio_iommu_driver *iommu_driver =
+ device->group->container->iommu_driver;
+
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+}
+
+void vfio_device_container_unregister(struct vfio_device *device)
+{
+ struct vfio_iommu_driver *iommu_driver =
+ device->group->container->iommu_driver;
+
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+}
+
+long vfio_container_ioctl_check_extension(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver;
+ long ret = 0;
+
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+
+ switch (arg) {
+ /* No base extensions yet */
+ default:
+ /*
+ * If no driver is set, poll all registered drivers for
+ * extensions and return the first positive result. If
+ * a driver is already set, further queries will be passed
+ * only to that driver.
+ */
+ if (!driver) {
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list,
+ vfio_next) {
+
+ if (!list_empty(&container->group_list) &&
+ !vfio_iommu_driver_allowed(container,
+ driver))
+ continue;
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ ret = driver->ops->ioctl(NULL,
+ VFIO_CHECK_EXTENSION,
+ arg);
+ module_put(driver->ops->owner);
+ if (ret > 0)
+ break;
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ } else
+ ret = driver->ops->ioctl(container->iommu_data,
+ VFIO_CHECK_EXTENSION, arg);
+ }
+
+ up_read(&container->group_lock);
+
+ return ret;
+}
+
+/* hold write lock on container->group_lock */
+static int __vfio_container_attach_groups(struct vfio_container *container,
+ struct vfio_iommu_driver *driver,
+ void *data)
+{
+ struct vfio_group *group;
+ int ret = -ENODEV;
+
+ list_for_each_entry(group, &container->group_list, container_next) {
+ ret = driver->ops->attach_group(data, group->iommu_group,
+ group->type);
+ if (ret)
+ goto unwind;
+ }
+
+ return ret;
+
+unwind:
+ list_for_each_entry_continue_reverse(group, &container->group_list,
+ container_next) {
+ driver->ops->detach_group(data, group->iommu_group);
+ }
+
+ return ret;
+}
+
+static long vfio_ioctl_set_iommu(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver;
+ long ret = -ENODEV;
+
+ down_write(&container->group_lock);
+
+ /*
+ * The container is designed to be an unprivileged interface while
+ * the group can be assigned to specific users. Therefore, only by
+ * adding a group to a container does the user get the privilege of
+ * enabling the iommu, which may allocate finite resources. There
+ * is no unset_iommu, but by removing all the groups from a container,
+ * the container is deprivileged and returns to an unset state.
+ */
+ if (list_empty(&container->group_list) || container->iommu_driver) {
+ up_write(&container->group_lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ void *data;
+
+ if (!vfio_iommu_driver_allowed(container, driver))
+ continue;
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ /*
+ * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
+ * so test which iommu driver reported support for this
+ * extension and call open on them. We also pass them the
+ * magic, allowing a single driver to support multiple
+ * interfaces if they'd like.
+ */
+ if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ data = driver->ops->open(arg);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ ret = __vfio_container_attach_groups(container, driver, data);
+ if (ret) {
+ driver->ops->release(data);
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ container->iommu_driver = driver;
+ container->iommu_data = data;
+ break;
+ }
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ up_write(&container->group_lock);
+
+ return ret;
+}
+
+static long vfio_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver;
+ void *data;
+ long ret = -EINVAL;
+
+ if (!container)
+ return ret;
+
+ switch (cmd) {
+ case VFIO_GET_API_VERSION:
+ ret = VFIO_API_VERSION;
+ break;
+ case VFIO_CHECK_EXTENSION:
+ ret = vfio_container_ioctl_check_extension(container, arg);
+ break;
+ case VFIO_SET_IOMMU:
+ ret = vfio_ioctl_set_iommu(container, arg);
+ break;
+ default:
+ driver = container->iommu_driver;
+ data = container->iommu_data;
+
+ if (driver) /* passthrough all unrecognized ioctls */
+ ret = driver->ops->ioctl(data, cmd, arg);
+ }
+
+ return ret;
+}
+
+static int vfio_fops_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container;
+
+ container = kzalloc(sizeof(*container), GFP_KERNEL);
+ if (!container)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&container->group_list);
+ init_rwsem(&container->group_lock);
+ kref_init(&container->kref);
+
+ filep->private_data = container;
+
+ return 0;
+}
+
+static int vfio_fops_release(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (driver && driver->ops->notify)
+ driver->ops->notify(container->iommu_data,
+ VFIO_IOMMU_CONTAINER_CLOSE);
+
+ filep->private_data = NULL;
+
+ vfio_container_put(container);
+
+ return 0;
+}
+
+static const struct file_operations vfio_fops = {
+ .owner = THIS_MODULE,
+ .open = vfio_fops_open,
+ .release = vfio_fops_release,
+ .unlocked_ioctl = vfio_fops_unl_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+struct vfio_container *vfio_container_from_file(struct file *file)
+{
+ struct vfio_container *container;
+
+ /* Sanity check, is this really our fd? */
+ if (file->f_op != &vfio_fops)
+ return NULL;
+
+ container = file->private_data;
+ WARN_ON(!container); /* fget ensures we don't race vfio_release */
+ return container;
+}
+
+static struct miscdevice vfio_dev = {
+ .minor = VFIO_MINOR,
+ .name = "vfio",
+ .fops = &vfio_fops,
+ .nodename = "vfio/vfio",
+ .mode = S_IRUGO | S_IWUGO,
+};
+
+int vfio_container_attach_group(struct vfio_container *container,
+ struct vfio_group *group)
+{
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ lockdep_assert_held(&group->group_lock);
+
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ down_write(&container->group_lock);
+
+ /* Real groups and fake groups cannot mix */
+ if (!list_empty(&container->group_list) &&
+ container->noiommu != (group->type == VFIO_NO_IOMMU)) {
+ ret = -EPERM;
+ goto out_unlock_container;
+ }
+
+ if (group->type == VFIO_IOMMU) {
+ ret = iommu_group_claim_dma_owner(group->iommu_group, group);
+ if (ret)
+ goto out_unlock_container;
+ }
+
+ driver = container->iommu_driver;
+ if (driver) {
+ ret = driver->ops->attach_group(container->iommu_data,
+ group->iommu_group,
+ group->type);
+ if (ret) {
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(
+ group->iommu_group);
+ goto out_unlock_container;
+ }
+ }
+
+ group->container = container;
+ group->container_users = 1;
+ container->noiommu = (group->type == VFIO_NO_IOMMU);
+ list_add(&group->container_next, &container->group_list);
+
+ /* Get a reference on the container and mark a user within the group */
+ vfio_container_get(container);
+
+out_unlock_container:
+ up_write(&container->group_lock);
+ return ret;
+}
+
+void vfio_group_detach_container(struct vfio_group *group)
+{
+ struct vfio_container *container = group->container;
+ struct vfio_iommu_driver *driver;
+
+ lockdep_assert_held(&group->group_lock);
+ WARN_ON(group->container_users != 1);
+
+ down_write(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (driver)
+ driver->ops->detach_group(container->iommu_data,
+ group->iommu_group);
+
+ if (group->type == VFIO_IOMMU)
+ iommu_group_release_dma_owner(group->iommu_group);
+
+ group->container = NULL;
+ group->container_users = 0;
+ list_del(&group->container_next);
+
+ /* Detaching the last group deprivileges a container, remove iommu */
+ if (driver && list_empty(&container->group_list)) {
+ driver->ops->release(container->iommu_data);
+ module_put(driver->ops->owner);
+ container->iommu_driver = NULL;
+ container->iommu_data = NULL;
+ }
+
+ up_write(&container->group_lock);
+
+ vfio_container_put(container);
+}
+
+int vfio_device_assign_container(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+
+ lockdep_assert_held(&group->group_lock);
+
+ if (!group->container || !group->container->iommu_driver ||
+ WARN_ON(!group->container_users))
+ return -EINVAL;
+
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ get_file(group->opened_file);
+ group->container_users++;
+ return 0;
+}
+
+void vfio_device_unassign_container(struct vfio_device *device)
+{
+ mutex_lock(&device->group->group_lock);
+ WARN_ON(device->group->container_users <= 1);
+ device->group->container_users--;
+ fput(device->group->opened_file);
+ mutex_unlock(&device->group->group_lock);
+}
+
+/*
+ * Pin contiguous user pages and return their associated host pages for local
+ * domain only.
+ * @device [in] : device
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in] : protection flags
+ * @pages[out] : array of host pages
+ * Return error or number of pages pinned.
+ *
+ * A driver may only call this function if the vfio_device was created
+ * by vfio_register_emulated_iommu_dev().
+ */
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
+{
+ struct vfio_container *container;
+ struct vfio_group *group = device->group;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!pages || !npage || !vfio_assert_device_open(device))
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ /* group->container cannot change while a vfio device is open */
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->pin_pages))
+ ret = driver->ops->pin_pages(container->iommu_data,
+ group->iommu_group, iova,
+ npage, prot, pages);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_pin_pages);
+
+/*
+ * Unpin contiguous host pages for local domain only.
+ * @device [in] : device
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ */
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
+
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
+
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
+ driver = container->iommu_driver;
+
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
+}
+EXPORT_SYMBOL(vfio_unpin_pages);
+
+/*
+ * This interface allows the CPUs to perform some sort of virtual DMA on
+ * behalf of the device.
+ *
+ * CPUs read/write from/into a range of IOVAs pointing to user space memory
+ * into/from a kernel buffer.
+ *
+ * As the read/write of user space memory is conducted via the CPUs and is
+ * not a real device DMA, it is not necessary to pin the user space memory.
+ *
+ * @device [in] : VFIO device
+ * @iova [in] : base IOVA of a user space buffer
+ * @data [in] : pointer to kernel buffer
+ * @len [in] : kernel buffer length
+ * @write : indicate read or write
+ * Return error code on failure or 0 on success.
+ */
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
+ size_t len, bool write)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ if (!data || len <= 0 || !vfio_assert_device_open(device))
+ return -EINVAL;
+
+ /* group->container cannot change while a vfio device is open */
+ container = device->group->container;
+ driver = container->iommu_driver;
+
+ if (likely(driver && driver->ops->dma_rw))
+ ret = driver->ops->dma_rw(container->iommu_data,
+ iova, data, len, write);
+ else
+ ret = -ENOTTY;
+ return ret;
+}
+EXPORT_SYMBOL(vfio_dma_rw);
+
+int __init vfio_container_init(void)
+{
+ int ret;
+
+ mutex_init(&vfio.iommu_drivers_lock);
+ INIT_LIST_HEAD(&vfio.iommu_drivers_list);
+
+ ret = misc_register(&vfio_dev);
+ if (ret) {
+ pr_err("vfio: misc device register failed\n");
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU)) {
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
+ if (ret)
+ goto err_misc;
+ }
+ return 0;
+
+err_misc:
+ misc_deregister(&vfio_dev);
+ return ret;
+}
+
+void vfio_container_cleanup(void)
+{
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU))
+ vfio_unregister_iommu_driver(&vfio_noiommu_ops);
+ misc_deregister(&vfio_dev);
+ mutex_destroy(&vfio.iommu_drivers_lock);
+}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 3feff729f3ce..b16874e913e4 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -108,9 +108,9 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
/* reset the device before cleaning up the interrupts */
ret = vfio_fsl_mc_reset_device(vdev);
- if (WARN_ON(ret))
+ if (ret)
dev_warn(&mc_cont->dev,
- "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
+ "VFIO_FSL_MC: reset device has failed (%d)\n", ret);
vfio_fsl_mc_irqs_cleanup(vdev);
@@ -418,16 +418,7 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
}
-static const struct vfio_device_ops vfio_fsl_mc_ops = {
- .name = "vfio-fsl-mc",
- .open_device = vfio_fsl_mc_open_device,
- .close_device = vfio_fsl_mc_close_device,
- .ioctl = vfio_fsl_mc_ioctl,
- .read = vfio_fsl_mc_read,
- .write = vfio_fsl_mc_write,
- .mmap = vfio_fsl_mc_mmap,
-};
-
+static const struct vfio_device_ops vfio_fsl_mc_ops;
static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -518,35 +509,43 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
}
-static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+static int vfio_fsl_mc_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_fsl_mc_device *vdev;
- struct device *dev = &mc_dev->dev;
+ struct vfio_fsl_mc_device *vdev =
+ container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(core_vdev->dev);
int ret;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
- vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops);
vdev->mc_dev = mc_dev;
mutex_init(&vdev->igate);
if (is_fsl_mc_bus_dprc(mc_dev))
- ret = vfio_assign_device_set(&vdev->vdev, &mc_dev->dev);
+ ret = vfio_assign_device_set(core_vdev, &mc_dev->dev);
else
- ret = vfio_assign_device_set(&vdev->vdev, mc_dev->dev.parent);
- if (ret)
- goto out_uninit;
+ ret = vfio_assign_device_set(core_vdev, mc_dev->dev.parent);
- ret = vfio_fsl_mc_init_device(vdev);
if (ret)
- goto out_uninit;
+ return ret;
+
+ /* device_set is released by vfio core if @init fails */
+ return vfio_fsl_mc_init_device(vdev);
+}
+
+static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+{
+ struct vfio_fsl_mc_device *vdev;
+ struct device *dev = &mc_dev->dev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_fsl_mc_device, vdev, dev,
+ &vfio_fsl_mc_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret) {
dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
- goto out_device;
+ goto out_put_vdev;
}
ret = vfio_fsl_mc_scan_container(mc_dev);
@@ -557,30 +556,44 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
out_group_dev:
vfio_unregister_group_dev(&vdev->vdev);
-out_device:
- vfio_fsl_uninit_device(vdev);
-out_uninit:
- vfio_uninit_group_dev(&vdev->vdev);
- kfree(vdev);
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
return ret;
}
+static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_fsl_mc_device *vdev =
+ container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+
+ vfio_fsl_uninit_device(vdev);
+ mutex_destroy(&vdev->igate);
+ vfio_free_device(core_vdev);
+}
+
static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev);
vfio_unregister_group_dev(&vdev->vdev);
- mutex_destroy(&vdev->igate);
-
dprc_remove_devices(mc_dev, NULL, 0);
- vfio_fsl_uninit_device(vdev);
-
- vfio_uninit_group_dev(&vdev->vdev);
- kfree(vdev);
+ vfio_put_device(&vdev->vdev);
return 0;
}
+static const struct vfio_device_ops vfio_fsl_mc_ops = {
+ .name = "vfio-fsl-mc",
+ .init = vfio_fsl_mc_init_dev,
+ .release = vfio_fsl_mc_release_dev,
+ .open_device = vfio_fsl_mc_open_device,
+ .close_device = vfio_fsl_mc_close_device,
+ .ioctl = vfio_fsl_mc_ioctl,
+ .read = vfio_fsl_mc_read,
+ .write = vfio_fsl_mc_write,
+ .mmap = vfio_fsl_mc_mmap,
+};
+
static struct fsl_mc_driver vfio_fsl_mc_driver = {
.probe = vfio_fsl_mc_probe,
.remove = vfio_fsl_mc_remove,
diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
new file mode 100644
index 000000000000..6631e8befe1b
--- /dev/null
+++ b/drivers/vfio/iova_bitmap.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#include <linux/iova_bitmap.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
+
+/*
+ * struct iova_bitmap_map - A bitmap representing an IOVA range
+ *
+ * Main data structure for tracking mapped user pages of bitmap data.
+ *
+ * For example, for something recording dirty IOVAs, it will be provided a
+ * struct iova_bitmap structure, as a general structure for iterating the
+ * total IOVA range. The struct iova_bitmap_map, though, represents the
+ * subset of said IOVA space that is pinned by its parent structure (struct
+ * iova_bitmap).
+ *
+ * The user does not need to exact location of the bits in the bitmap.
+ * From user perspective the only API available is iova_bitmap_set() which
+ * records the IOVA *range* in the bitmap by setting the corresponding
+ * bits.
+ *
+ * The bitmap is an array of u64 whereas each bit represents an IOVA of
+ * range of (1 << pgshift). Thus formula for the bitmap data to be set is:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << (iova % 64))
+ */
+struct iova_bitmap_map {
+ /* base IOVA representing bit 0 of the first page */
+ unsigned long iova;
+
+ /* page size order that each bit granules to */
+ unsigned long pgshift;
+
+ /* page offset of the first user page pinned */
+ unsigned long pgoff;
+
+ /* number of pages pinned */
+ unsigned long npages;
+
+ /* pinned pages representing the bitmap data */
+ struct page **pages;
+};
+
+/*
+ * struct iova_bitmap - The IOVA bitmap object
+ *
+ * Main data structure for iterating over the bitmap data.
+ *
+ * Abstracts the pinning work and iterates in IOVA ranges.
+ * It uses a windowing scheme and pins the bitmap in relatively
+ * big ranges e.g.
+ *
+ * The bitmap object uses one base page to store all the pinned pages
+ * pointers related to the bitmap. For sizeof(struct page*) == 8 it stores
+ * 512 struct page pointers which, if the base page size is 4K, it means
+ * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is
+ * also 4K then the range window to iterate is 64G.
+ *
+ * For example iterating on a total IOVA range of 4G..128G, it will walk
+ * through this set of ranges:
+ *
+ * 4G - 68G-1 (64G)
+ * 68G - 128G-1 (64G)
+ *
+ * An example of the APIs on how to use/iterate over the IOVA bitmap:
+ *
+ * bitmap = iova_bitmap_alloc(iova, length, page_size, data);
+ * if (IS_ERR(bitmap))
+ * return PTR_ERR(bitmap);
+ *
+ * ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn);
+ *
+ * iova_bitmap_free(bitmap);
+ *
+ * Each iteration of the @dirty_reporter_fn is called with a unique @iova
+ * and @length argument, indicating the current range available through the
+ * iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty
+ * areas (@iova_length) within that provided range, as following:
+ *
+ * iova_bitmap_set(bitmap, iova, iova_length);
+ *
+ * The internals of the object uses an index @mapped_base_index that indexes
+ * which u64 word of the bitmap is mapped, up to @mapped_total_index.
+ * Those keep being incremented until @mapped_total_index is reached while
+ * mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages.
+ *
+ * The IOVA bitmap is usually located on what tracks DMA mapped ranges or
+ * some form of IOVA range tracking that co-relates to the user passed
+ * bitmap.
+ */
+struct iova_bitmap {
+ /* IOVA range representing the currently mapped bitmap data */
+ struct iova_bitmap_map mapped;
+
+ /* userspace address of the bitmap */
+ u64 __user *bitmap;
+
+ /* u64 index that @mapped points to */
+ unsigned long mapped_base_index;
+
+ /* how many u64 can we walk in total */
+ unsigned long mapped_total_index;
+
+ /* base IOVA of the whole bitmap */
+ unsigned long iova;
+
+ /* length of the IOVA range for the whole bitmap */
+ size_t length;
+};
+
+/*
+ * Converts a relative IOVA to a bitmap index.
+ * This function provides the index into the u64 array (bitmap::bitmap)
+ * for a given IOVA offset.
+ * Relative IOVA means relative to the bitmap::mapped base IOVA
+ * (stored in mapped::iova). All computations in this file are done using
+ * relative IOVAs and thus avoid an extra subtraction against mapped::iova.
+ * The user API iova_bitmap_set() always uses a regular absolute IOVAs.
+ */
+static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
+ unsigned long iova)
+{
+ unsigned long pgsize = 1 << bitmap->mapped.pgshift;
+
+ return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+}
+
+/*
+ * Converts a bitmap index to a *relative* IOVA.
+ */
+static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap,
+ unsigned long index)
+{
+ unsigned long pgshift = bitmap->mapped.pgshift;
+
+ return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift;
+}
+
+/*
+ * Returns the base IOVA of the mapped range.
+ */
+static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap)
+{
+ unsigned long skip = bitmap->mapped_base_index;
+
+ return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip);
+}
+
+/*
+ * Pins the bitmap user pages for the current range window.
+ * This is internal to IOVA bitmap and called when advancing the
+ * index (@mapped_base_index) or allocating the bitmap.
+ */
+static int iova_bitmap_get(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long npages;
+ u64 __user *addr;
+ long ret;
+
+ /*
+ * @mapped_base_index is the index of the currently mapped u64 words
+ * that we have access. Anything before @mapped_base_index is not
+ * mapped. The range @mapped_base_index .. @mapped_total_index-1 is
+ * mapped but capped at a maximum number of pages.
+ */
+ npages = DIV_ROUND_UP((bitmap->mapped_total_index -
+ bitmap->mapped_base_index) *
+ sizeof(*bitmap->bitmap), PAGE_SIZE);
+
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages, PAGE_SIZE / sizeof(struct page *));
+
+ /*
+ * Bitmap address to be pinned is calculated via pointer arithmetic
+ * with bitmap u64 word index.
+ */
+ addr = bitmap->bitmap + bitmap->mapped_base_index;
+
+ ret = pin_user_pages_fast((unsigned long)addr, npages,
+ FOLL_WRITE, mapped->pages);
+ if (ret <= 0)
+ return -EFAULT;
+
+ mapped->npages = (unsigned long)ret;
+ /* Base IOVA where @pages point to i.e. bit 0 of the first page */
+ mapped->iova = iova_bitmap_mapped_iova(bitmap);
+
+ /*
+ * offset of the page where pinned pages bit 0 is located.
+ * This handles the case where the bitmap is not PAGE_SIZE
+ * aligned.
+ */
+ mapped->pgoff = offset_in_page(addr);
+ return 0;
+}
+
+/*
+ * Unpins the bitmap user pages and clears @npages
+ * (un)pinning is abstracted from API user and it's done when advancing
+ * the index or freeing the bitmap.
+ */
+static void iova_bitmap_put(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+
+ if (mapped->npages) {
+ unpin_user_pages(mapped->pages, mapped->npages);
+ mapped->npages = 0;
+ }
+}
+
+/**
+ * iova_bitmap_alloc() - Allocates an IOVA bitmap object
+ * @iova: Start address of the IOVA range
+ * @length: Length of the IOVA range
+ * @page_size: Page size of the IOVA bitmap. It defines what each bit
+ * granularity represents
+ * @data: Userspace address of the bitmap
+ *
+ * Allocates an IOVA object and initializes all its fields including the
+ * first user pages of @data.
+ *
+ * Return: A pointer to a newly allocated struct iova_bitmap
+ * or ERR_PTR() on error.
+ */
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size, u64 __user *data)
+{
+ struct iova_bitmap_map *mapped;
+ struct iova_bitmap *bitmap;
+ int rc;
+
+ bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
+ if (!bitmap)
+ return ERR_PTR(-ENOMEM);
+
+ mapped = &bitmap->mapped;
+ mapped->pgshift = __ffs(page_size);
+ bitmap->bitmap = data;
+ bitmap->mapped_total_index =
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ bitmap->iova = iova;
+ bitmap->length = length;
+ mapped->iova = iova;
+ mapped->pages = (struct page **)__get_free_page(GFP_KERNEL);
+ if (!mapped->pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = iova_bitmap_get(bitmap);
+ if (rc)
+ goto err;
+ return bitmap;
+
+err:
+ iova_bitmap_free(bitmap);
+ return ERR_PTR(rc);
+}
+
+/**
+ * iova_bitmap_free() - Frees an IOVA bitmap object
+ * @bitmap: IOVA bitmap to free
+ *
+ * It unpins and releases pages array memory and clears any leftover
+ * state.
+ */
+void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+
+ iova_bitmap_put(bitmap);
+
+ if (mapped->pages) {
+ free_page((unsigned long)mapped->pages);
+ mapped->pages = NULL;
+ }
+
+ kfree(bitmap);
+}
+
+/*
+ * Returns the remaining bitmap indexes from mapped_total_index to process for
+ * the currently pinned bitmap pages.
+ */
+static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+{
+ unsigned long remaining;
+
+ remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ remaining = min_t(unsigned long, remaining,
+ (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap));
+
+ return remaining;
+}
+
+/*
+ * Returns the length of the mapped IOVA range.
+ */
+static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap)
+{
+ unsigned long max_iova = bitmap->iova + bitmap->length - 1;
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+ unsigned long remaining;
+
+ /*
+ * iova_bitmap_mapped_remaining() returns a number of indexes which
+ * when converted to IOVA gives us a max length that the bitmap
+ * pinned data can cover. Afterwards, that is capped to
+ * only cover the IOVA range in @bitmap::iova .. @bitmap::length.
+ */
+ remaining = iova_bitmap_index_to_offset(bitmap,
+ iova_bitmap_mapped_remaining(bitmap));
+
+ if (iova + remaining - 1 > max_iova)
+ remaining -= ((iova + remaining - 1) - max_iova);
+
+ return remaining;
+}
+
+/*
+ * Returns true if there's not more data to iterate.
+ */
+static bool iova_bitmap_done(struct iova_bitmap *bitmap)
+{
+ return bitmap->mapped_base_index >= bitmap->mapped_total_index;
+}
+
+/*
+ * Advances to the next range, releases the current pinned
+ * pages and pins the next set of bitmap pages.
+ * Returns 0 on success or otherwise errno.
+ */
+static int iova_bitmap_advance(struct iova_bitmap *bitmap)
+{
+ unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1;
+ unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1;
+
+ bitmap->mapped_base_index += count;
+
+ iova_bitmap_put(bitmap);
+ if (iova_bitmap_done(bitmap))
+ return 0;
+
+ /* When advancing the index we pin the next set of bitmap pages */
+ return iova_bitmap_get(bitmap);
+}
+
+/**
+ * iova_bitmap_for_each() - Iterates over the bitmap
+ * @bitmap: IOVA bitmap to iterate
+ * @opaque: Additional argument to pass to the callback
+ * @fn: Function that gets called for each IOVA range
+ *
+ * Helper function to iterate over bitmap data representing a portion of IOVA
+ * space. It hides the complexity of iterating bitmaps and translating the
+ * mapped bitmap user pages into IOVA ranges to process.
+ *
+ * Return: 0 on success, and an error on failure either upon
+ * iteration or when the callback returns an error.
+ */
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ int ret = 0;
+
+ for (; !iova_bitmap_done(bitmap) && !ret;
+ ret = iova_bitmap_advance(bitmap)) {
+ ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap),
+ iova_bitmap_mapped_length(bitmap), opaque);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iova_bitmap_set() - Records an IOVA range in bitmap
+ * @bitmap: IOVA bitmap
+ * @iova: IOVA to start
+ * @length: IOVA range length
+ *
+ * Set the bits corresponding to the range [iova .. iova+length-1] in
+ * the user bitmap.
+ *
+ * Return: The number of bits set.
+ */
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long offset = (iova - mapped->iova) >> mapped->pgshift;
+ unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift);
+ unsigned long page_idx = offset / BITS_PER_PAGE;
+ unsigned long page_offset = mapped->pgoff;
+ void *kaddr;
+
+ offset = offset % BITS_PER_PAGE;
+
+ do {
+ unsigned long size = min(BITS_PER_PAGE - offset, nbits);
+
+ kaddr = kmap_local_page(mapped->pages[page_idx]);
+ bitmap_set(kaddr + page_offset, offset, size);
+ kunmap_local(kaddr);
+ page_offset = offset = 0;
+ nbits -= size;
+ page_idx++;
+ } while (nbits > 0);
+}
+EXPORT_SYMBOL_GPL(iova_bitmap_set);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b8b9e7911e55..58f91b3bd670 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -8,9 +8,7 @@
*/
#include <linux/module.h>
-#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
@@ -20,71 +18,11 @@
#define DRIVER_AUTHOR "NVIDIA Corporation"
#define DRIVER_DESC "Mediated device Core Driver"
-static LIST_HEAD(parent_list);
-static DEFINE_MUTEX(parent_list_lock);
static struct class_compat *mdev_bus_compat_class;
static LIST_HEAD(mdev_list);
static DEFINE_MUTEX(mdev_list_lock);
-struct device *mdev_parent_dev(struct mdev_device *mdev)
-{
- return mdev->type->parent->dev;
-}
-EXPORT_SYMBOL(mdev_parent_dev);
-
-/*
- * Return the index in supported_type_groups that this mdev_device was created
- * from.
- */
-unsigned int mdev_get_type_group_id(struct mdev_device *mdev)
-{
- return mdev->type->type_group_id;
-}
-EXPORT_SYMBOL(mdev_get_type_group_id);
-
-/*
- * Used in mdev_type_attribute sysfs functions to return the index in the
- * supported_type_groups that the sysfs is called from.
- */
-unsigned int mtype_get_type_group_id(struct mdev_type *mtype)
-{
- return mtype->type_group_id;
-}
-EXPORT_SYMBOL(mtype_get_type_group_id);
-
-/*
- * Used in mdev_type_attribute sysfs functions to return the parent struct
- * device
- */
-struct device *mtype_get_parent_dev(struct mdev_type *mtype)
-{
- return mtype->parent->dev;
-}
-EXPORT_SYMBOL(mtype_get_parent_dev);
-
-/* Should be called holding parent_list_lock */
-static struct mdev_parent *__find_parent_device(struct device *dev)
-{
- struct mdev_parent *parent;
-
- list_for_each_entry(parent, &parent_list, next) {
- if (parent->dev == dev)
- return parent;
- }
- return NULL;
-}
-
-void mdev_release_parent(struct kref *kref)
-{
- struct mdev_parent *parent = container_of(kref, struct mdev_parent,
- ref);
- struct device *dev = parent->dev;
-
- kfree(parent);
- put_device(dev);
-}
-
/* Caller must hold parent unreg_sem read or write lock */
static void mdev_device_remove_common(struct mdev_device *mdev)
{
@@ -99,145 +37,96 @@ static void mdev_device_remove_common(struct mdev_device *mdev)
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev)
- mdev_device_remove_common(mdev);
+ if (dev->bus == &mdev_bus_type)
+ mdev_device_remove_common(to_mdev_device(dev));
return 0;
}
/*
- * mdev_register_device : Register a device
+ * mdev_register_parent: Register a device as parent for mdevs
+ * @parent: parent structure registered
* @dev: device structure representing parent device.
* @mdev_driver: Device driver to bind to the newly created mdev
+ * @types: Array of supported mdev types
+ * @nr_types: Number of entries in @types
+ *
+ * Registers the @parent stucture as a parent for mdev types and thus mdev
+ * devices. The caller needs to hold a reference on @dev that must not be
+ * released until after the call to mdev_unregister_parent().
*
- * Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
-int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver)
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types)
{
- int ret;
- struct mdev_parent *parent;
char *env_string = "MDEV_STATE=registered";
char *envp[] = { env_string, NULL };
+ int ret;
- /* check for mandatory ops */
- if (!mdev_driver->supported_type_groups)
- return -EINVAL;
-
- dev = get_device(dev);
- if (!dev)
- return -EINVAL;
-
- mutex_lock(&parent_list_lock);
-
- /* Check for duplicate */
- parent = __find_parent_device(dev);
- if (parent) {
- parent = NULL;
- ret = -EEXIST;
- goto add_dev_err;
- }
-
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
- if (!parent) {
- ret = -ENOMEM;
- goto add_dev_err;
- }
-
- kref_init(&parent->ref);
+ memset(parent, 0, sizeof(*parent));
init_rwsem(&parent->unreg_sem);
-
parent->dev = dev;
parent->mdev_driver = mdev_driver;
+ parent->types = types;
+ parent->nr_types = nr_types;
+ atomic_set(&parent->available_instances, mdev_driver->max_instances);
if (!mdev_bus_compat_class) {
mdev_bus_compat_class = class_compat_register("mdev_bus");
- if (!mdev_bus_compat_class) {
- ret = -ENOMEM;
- goto add_dev_err;
- }
+ if (!mdev_bus_compat_class)
+ return -ENOMEM;
}
ret = parent_create_sysfs_files(parent);
if (ret)
- goto add_dev_err;
+ return ret;
ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
if (ret)
dev_warn(dev, "Failed to create compatibility class link\n");
- list_add(&parent->next, &parent_list);
- mutex_unlock(&parent_list_lock);
-
dev_info(dev, "MDEV: Registered\n");
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
-
return 0;
-
-add_dev_err:
- mutex_unlock(&parent_list_lock);
- if (parent)
- mdev_put_parent(parent);
- else
- put_device(dev);
- return ret;
}
-EXPORT_SYMBOL(mdev_register_device);
+EXPORT_SYMBOL(mdev_register_parent);
/*
- * mdev_unregister_device : Unregister a parent device
- * @dev: device structure representing parent device.
- *
- * Remove device from list of registered parent devices. Give a chance to free
- * existing mediated devices for given device.
+ * mdev_unregister_parent : Unregister a parent device
+ * @parent: parent structure to unregister
*/
-
-void mdev_unregister_device(struct device *dev)
+void mdev_unregister_parent(struct mdev_parent *parent)
{
- struct mdev_parent *parent;
char *env_string = "MDEV_STATE=unregistered";
char *envp[] = { env_string, NULL };
- mutex_lock(&parent_list_lock);
- parent = __find_parent_device(dev);
-
- if (!parent) {
- mutex_unlock(&parent_list_lock);
- return;
- }
- dev_info(dev, "MDEV: Unregistering\n");
-
- list_del(&parent->next);
- mutex_unlock(&parent_list_lock);
+ dev_info(parent->dev, "MDEV: Unregistering\n");
down_write(&parent->unreg_sem);
-
- class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
-
- device_for_each_child(dev, NULL, mdev_device_remove_cb);
-
+ class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL);
+ device_for_each_child(parent->dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
up_write(&parent->unreg_sem);
- mdev_put_parent(parent);
-
- /* We still have the caller's reference to use for the uevent */
- kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&parent->dev->kobj, KOBJ_CHANGE, envp);
}
-EXPORT_SYMBOL(mdev_unregister_device);
+EXPORT_SYMBOL(mdev_unregister_parent);
static void mdev_device_release(struct device *dev)
{
struct mdev_device *mdev = to_mdev_device(dev);
-
- /* Pairs with the get in mdev_device_create() */
- kobject_put(&mdev->type->kobj);
+ struct mdev_parent *parent = mdev->type->parent;
mutex_lock(&mdev_list_lock);
list_del(&mdev->next);
+ if (!parent->mdev_driver->get_available)
+ atomic_inc(&parent->available_instances);
mutex_unlock(&mdev_list_lock);
+ /* Pairs with the get in mdev_device_create() */
+ kobject_put(&mdev->type->kobj);
+
dev_dbg(&mdev->dev, "MDEV: destroying\n");
kfree(mdev);
}
@@ -259,6 +148,18 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
}
}
+ if (!drv->get_available) {
+ /*
+ * Note: that non-atomic read and dec is fine here because
+ * all modifications are under mdev_list_lock.
+ */
+ if (!atomic_read(&parent->available_instances)) {
+ mutex_unlock(&mdev_list_lock);
+ return -EUSERS;
+ }
+ atomic_dec(&parent->available_instances);
+ }
+
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
mutex_unlock(&mdev_list_lock);
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index 9c2af59809e2..7825d83a55f8 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -7,7 +7,6 @@
* Kirti Wankhede <kwankhede@nvidia.com>
*/
-#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/mdev.h>
@@ -47,7 +46,6 @@ struct bus_type mdev_bus_type = {
.remove = mdev_remove,
.match = mdev_match,
};
-EXPORT_SYMBOL_GPL(mdev_bus_type);
/**
* mdev_register_driver - register a new MDEV driver
@@ -57,10 +55,11 @@ EXPORT_SYMBOL_GPL(mdev_bus_type);
**/
int mdev_register_driver(struct mdev_driver *drv)
{
+ if (!drv->device_api)
+ return -EINVAL;
+
/* initialize common driver fields */
drv->driver.bus = &mdev_bus_type;
-
- /* register with core */
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(mdev_register_driver);
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 7c9fc79f3d83..af457b27f607 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -13,25 +13,7 @@
int mdev_bus_register(void);
void mdev_bus_unregister(void);
-struct mdev_parent {
- struct device *dev;
- struct mdev_driver *mdev_driver;
- struct kref ref;
- struct list_head next;
- struct kset *mdev_types_kset;
- struct list_head type_list;
- /* Synchronize device creation/removal with parent unregistration */
- struct rw_semaphore unreg_sem;
-};
-
-struct mdev_type {
- struct kobject kobj;
- struct kobject *devices_kobj;
- struct mdev_parent *parent;
- struct list_head next;
- unsigned int type_group_id;
-};
-
+extern struct bus_type mdev_bus_type;
extern const struct attribute_group *mdev_device_groups[];
#define to_mdev_type_attr(_attr) \
@@ -48,16 +30,4 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev);
int mdev_device_create(struct mdev_type *kobj, const guid_t *uuid);
int mdev_device_remove(struct mdev_device *dev);
-void mdev_release_parent(struct kref *kref);
-
-static inline void mdev_get_parent(struct mdev_parent *parent)
-{
- kref_get(&parent->ref);
-}
-
-static inline void mdev_put_parent(struct mdev_parent *parent)
-{
- kref_put(&parent->ref, mdev_release_parent);
-}
-
#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 0ccfeb3dda24..abe3359dd477 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -9,14 +9,24 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
-#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include "mdev_private.h"
-/* Static functions */
+struct mdev_type_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf);
+ ssize_t (*store)(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, const char *buf,
+ size_t count);
+};
+
+#define MDEV_TYPE_ATTR_RO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
+#define MDEV_TYPE_ATTR_WO(_name) \
+ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
static ssize_t mdev_type_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
@@ -74,152 +84,156 @@ static ssize_t create_store(struct mdev_type *mtype,
return count;
}
-
static MDEV_TYPE_ATTR_WO(create);
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", mtype->parent->mdev_driver->device_api);
+}
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static ssize_t name_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ mtype->pretty_name ? mtype->pretty_name : mtype->sysfs_name);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct mdev_driver *drv = mtype->parent->mdev_driver;
+
+ if (drv->get_available)
+ return sysfs_emit(buf, "%u\n", drv->get_available(mtype));
+ return sysfs_emit(buf, "%u\n",
+ atomic_read(&mtype->parent->available_instances));
+}
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ return mtype->parent->mdev_driver->show_description(mtype, buf);
+}
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *mdev_types_core_attrs[] = {
+ &mdev_type_attr_create.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static umode_t mdev_types_core_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &mdev_type_attr_description.attr &&
+ !to_mdev_type(kobj)->parent->mdev_driver->show_description)
+ return 0;
+ return attr->mode;
+}
+
+static struct attribute_group mdev_type_core_group = {
+ .attrs = mdev_types_core_attrs,
+ .is_visible = mdev_types_core_is_visible,
+};
+
+static const struct attribute_group *mdev_type_groups[] = {
+ &mdev_type_core_group,
+ NULL,
+};
+
static void mdev_type_release(struct kobject *kobj)
{
struct mdev_type *type = to_mdev_type(kobj);
pr_debug("Releasing group %s\n", kobj->name);
/* Pairs with the get in add_mdev_supported_type() */
- mdev_put_parent(type->parent);
- kfree(type);
+ put_device(type->parent->dev);
}
static struct kobj_type mdev_type_ktype = {
- .sysfs_ops = &mdev_type_sysfs_ops,
- .release = mdev_type_release,
+ .sysfs_ops = &mdev_type_sysfs_ops,
+ .release = mdev_type_release,
+ .default_groups = mdev_type_groups,
};
-static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
- unsigned int type_group_id)
+static int mdev_type_add(struct mdev_parent *parent, struct mdev_type *type)
{
- struct mdev_type *type;
- struct attribute_group *group =
- parent->mdev_driver->supported_type_groups[type_group_id];
int ret;
- if (!group->name) {
- pr_err("%s: Type name empty!\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- type = kzalloc(sizeof(*type), GFP_KERNEL);
- if (!type)
- return ERR_PTR(-ENOMEM);
-
type->kobj.kset = parent->mdev_types_kset;
type->parent = parent;
/* Pairs with the put in mdev_type_release() */
- mdev_get_parent(parent);
- type->type_group_id = type_group_id;
+ get_device(parent->dev);
ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
"%s-%s", dev_driver_string(parent->dev),
- group->name);
+ type->sysfs_name);
if (ret) {
kobject_put(&type->kobj);
- return ERR_PTR(ret);
+ return ret;
}
- ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr);
- if (ret)
- goto attr_create_failed;
-
type->devices_kobj = kobject_create_and_add("devices", &type->kobj);
if (!type->devices_kobj) {
ret = -ENOMEM;
goto attr_devices_failed;
}
- ret = sysfs_create_files(&type->kobj,
- (const struct attribute **)group->attrs);
- if (ret) {
- ret = -ENOMEM;
- goto attrs_failed;
- }
- return type;
+ return 0;
-attrs_failed:
- kobject_put(type->devices_kobj);
attr_devices_failed:
- sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
-attr_create_failed:
kobject_del(&type->kobj);
kobject_put(&type->kobj);
- return ERR_PTR(ret);
+ return ret;
}
-static void remove_mdev_supported_type(struct mdev_type *type)
+static void mdev_type_remove(struct mdev_type *type)
{
- struct attribute_group *group =
- type->parent->mdev_driver->supported_type_groups[type->type_group_id];
-
- sysfs_remove_files(&type->kobj,
- (const struct attribute **)group->attrs);
kobject_put(type->devices_kobj);
- sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
kobject_del(&type->kobj);
kobject_put(&type->kobj);
}
-static int add_mdev_supported_type_groups(struct mdev_parent *parent)
-{
- int i;
-
- for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) {
- struct mdev_type *type;
-
- type = add_mdev_supported_type(parent, i);
- if (IS_ERR(type)) {
- struct mdev_type *ltype, *tmp;
-
- list_for_each_entry_safe(ltype, tmp, &parent->type_list,
- next) {
- list_del(&ltype->next);
- remove_mdev_supported_type(ltype);
- }
- return PTR_ERR(type);
- }
- list_add(&type->next, &parent->type_list);
- }
- return 0;
-}
-
/* mdev sysfs functions */
void parent_remove_sysfs_files(struct mdev_parent *parent)
{
- struct mdev_type *type, *tmp;
-
- list_for_each_entry_safe(type, tmp, &parent->type_list, next) {
- list_del(&type->next);
- remove_mdev_supported_type(type);
- }
+ int i;
+ for (i = 0; i < parent->nr_types; i++)
+ mdev_type_remove(parent->types[i]);
kset_unregister(parent->mdev_types_kset);
}
int parent_create_sysfs_files(struct mdev_parent *parent)
{
- int ret;
+ int ret, i;
parent->mdev_types_kset = kset_create_and_add("mdev_supported_types",
NULL, &parent->dev->kobj);
-
if (!parent->mdev_types_kset)
return -ENOMEM;
- INIT_LIST_HEAD(&parent->type_list);
-
- ret = add_mdev_supported_type_groups(parent);
- if (ret)
- goto create_err;
+ for (i = 0; i < parent->nr_types; i++) {
+ ret = mdev_type_add(parent, parent->types[i]);
+ if (ret)
+ goto out_err;
+ }
return 0;
-create_err:
- kset_unregister(parent->mdev_types_kset);
- return ret;
+out_err:
+ while (--i >= 0)
+ mdev_type_remove(parent->types[i]);
+ return 0;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index ea762e28c1cc..39eeca18a0f7 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -16,7 +16,7 @@
#include "hisi_acc_vfio_pci.h"
-/* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
+/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
static int qm_wait_dev_not_ready(struct hisi_qm *qm)
{
u32 val;
@@ -189,7 +189,7 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
struct device *dev = &qm->pdev->dev;
int ret;
- /* check VF state */
+ /* Check VF state */
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
return -EBUSY;
@@ -337,16 +337,7 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
return 0;
}
-static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
-{
- struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
-
- return container_of(core_device, struct hisi_acc_vf_core_device,
- core_device);
-}
-
-static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
- struct hisi_qm *qm)
+static void vf_qm_fun_reset(struct hisi_qm *qm)
{
int i;
@@ -382,7 +373,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
}
- /* vf qp num check */
+ /* VF qp num check */
ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums\n");
@@ -396,7 +387,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
vf_qm->qp_num = ret;
- /* vf isolation state check */
+ /* VF isolation state check */
ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
if (ret) {
dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
@@ -405,7 +396,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (vf_data->que_iso_cfg != que_iso_state) {
dev_err(dev, "failed to match isolation state\n");
- return ret;
+ return -EINVAL;
}
ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
@@ -427,10 +418,10 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
int ret;
vf_data->acc_magic = ACC_DEV_MAGIC;
- /* save device id */
+ /* Save device id */
vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
- /* vf qp num save from PF */
+ /* VF qp num save from PF */
ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums!\n");
@@ -474,19 +465,19 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
ret = qm_set_regs(qm, vf_data);
if (ret) {
- dev_err(dev, "Set VF regs failed\n");
+ dev_err(dev, "set VF regs failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
if (ret) {
- dev_err(dev, "Set sqc failed\n");
+ dev_err(dev, "set sqc failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
if (ret) {
- dev_err(dev, "Set cqc failed\n");
+ dev_err(dev, "set cqc failed\n");
return ret;
}
@@ -528,12 +519,12 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
/* Every reg is 32 bit, the dma address is 64 bit. */
- vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
- vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
@@ -552,6 +543,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return 0;
}
+static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+ return container_of(core_device, struct hisi_acc_vf_core_device,
+ core_device);
+}
+
/* Check the PF's RAS state and Function INT state */
static int
hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -662,7 +661,10 @@ static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vd
if (hisi_acc_vdev->vf_qm_state != QM_READY)
return;
- vf_qm_fun_reset(hisi_acc_vdev, vf_qm);
+ /* Make sure the device is enabled */
+ qm_dev_cmd_init(vf_qm);
+
+ vf_qm_fun_reset(vf_qm);
}
static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -970,7 +972,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
@@ -1213,8 +1215,28 @@ static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
.migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
+static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
+ struct hisi_acc_vf_core_device, core_device.vdev);
+ struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
+ struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
+
+ hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
+ hisi_acc_vdev->pf_qm = pf_qm;
+ hisi_acc_vdev->vf_dev = pdev;
+ mutex_init(&hisi_acc_vdev->state_mutex);
+
+ core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
+ core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
+
+ return vfio_pci_core_init_dev(core_vdev);
+}
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
+ .init = hisi_acc_vfio_pci_migrn_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = hisi_acc_vfio_pci_close_device,
.ioctl = hisi_acc_vfio_pci_ioctl,
@@ -1228,6 +1250,8 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.name = "hisi-acc-vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -1239,73 +1263,45 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.match = vfio_pci_core_match,
};
-static int
-hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev,
- struct pci_dev *pdev, struct hisi_qm *pf_qm)
-{
- int vf_id;
-
- vf_id = pci_iov_vf_id(pdev);
- if (vf_id < 0)
- return vf_id;
-
- hisi_acc_vdev->vf_id = vf_id + 1;
- hisi_acc_vdev->core_device.vdev.migration_flags =
- VFIO_MIGRATION_STOP_COPY;
- hisi_acc_vdev->pf_qm = pf_qm;
- hisi_acc_vdev->vf_dev = pdev;
- mutex_init(&hisi_acc_vdev->state_mutex);
-
- return 0;
-}
-
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev;
+ const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
struct hisi_qm *pf_qm;
+ int vf_id;
int ret;
- hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL);
- if (!hisi_acc_vdev)
- return -ENOMEM;
-
pf_qm = hisi_acc_get_pf_qm(pdev);
if (pf_qm && pf_qm->ver >= QM_HW_V3) {
- ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm);
- if (!ret) {
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_migrn_ops);
- hisi_acc_vdev->core_device.vdev.mig_ops =
- &hisi_acc_vfio_pci_migrn_state_ops;
- } else {
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id >= 0)
+ ops = &hisi_acc_vfio_pci_migrn_ops;
+ else
pci_warn(pdev, "migration support failed, continue with generic interface\n");
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_ops);
- }
- } else {
- vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
- &hisi_acc_vfio_pci_ops);
}
+ hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
+ core_device.vdev, &pdev->dev, ops);
+ if (IS_ERR(hisi_acc_vdev))
+ return PTR_ERR(hisi_acc_vdev);
+
dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
- kfree(hisi_acc_vdev);
+out_put_vdev:
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
return ret;
}
static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
{
- struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
- vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
- kfree(hisi_acc_vdev);
+ vfio_put_device(&hisi_acc_vdev->core_device.vdev);
}
static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index 5494f4983bbe..67343325b320 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -16,7 +16,6 @@
#define SEC_CORE_INT_STATUS 0x301008
#define HPRE_HAC_INT_STATUS 0x301800
#define HZIP_CORE_INT_STATUS 0x3010AC
-#define QM_QUE_ISO_CFG 0x301154
#define QM_VFT_CFG_RDY 0x10006c
#define QM_VFT_CFG_OP_WR 0x100058
@@ -80,7 +79,7 @@ struct acc_vf_data {
/* QM reserved 5 regs */
u32 qm_rsv_regs[5];
u32 padding;
- /* qm memory init information */
+ /* QM memory init information */
u64 eqe_dma;
u64 aeqe_dma;
u64 sqc_dma;
@@ -99,7 +98,7 @@ struct hisi_acc_vf_migration_file {
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
u8 deferred_reset:1;
- /* for migration state */
+ /* For migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
struct pci_dev *pf_dev;
@@ -108,7 +107,7 @@ struct hisi_acc_vf_core_device {
struct hisi_qm vf_qm;
u32 vf_qm_state;
int vf_id;
- /* for reset handler */
+ /* For reset handler */
spinlock_t reset_lock;
struct hisi_acc_vf_migration_file *resuming_migf;
struct hisi_acc_vf_migration_file *saving_migf;
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index dd5d7bfe0a49..c604b70437a5 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -5,8 +5,12 @@
#include "cmd.h"
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id);
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
@@ -66,25 +70,35 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
return 0;
}
+static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
+{
+ /* Mark the tracker under an error and wake it up if it's running */
+ mvdev->tracker.is_err = true;
+ complete(&mvdev->tracker_comp);
+}
+
static int mlx5fv_vf_event(struct notifier_block *nb,
unsigned long event, void *data)
{
struct mlx5vf_pci_core_device *mvdev =
container_of(nb, struct mlx5vf_pci_core_device, nb);
- mutex_lock(&mvdev->state_mutex);
switch (event) {
case MLX5_PF_NOTIFY_ENABLE_VF:
+ mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = false;
+ mlx5vf_state_mutex_unlock(mvdev);
break;
case MLX5_PF_NOTIFY_DISABLE_VF:
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
+ mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = true;
+ mlx5vf_state_mutex_unlock(mvdev);
break;
default:
break;
}
- mlx5vf_state_mutex_unlock(mvdev);
+
return 0;
}
@@ -93,8 +107,11 @@ void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
if (!mvdev->migrate_cap)
return;
+ /* Must be done outside the lock to let it progress */
+ set_tracker_error(mvdev);
mutex_lock(&mvdev->state_mutex);
mlx5vf_disable_fds(mvdev);
+ _mlx5vf_free_page_tracker_resources(mvdev);
mlx5vf_state_mutex_unlock(mvdev);
}
@@ -109,7 +126,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
}
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
- const struct vfio_migration_ops *mig_ops)
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -151,6 +169,9 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
mvdev->core_device.vdev.mig_ops = mig_ops;
+ init_completion(&mvdev->tracker_comp);
+ if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
+ mvdev->core_device.vdev.log_ops = log_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
@@ -188,11 +209,13 @@ err_exec:
return ret;
}
-static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_vf_migration_file *migf, u32 *mkey)
+static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_vf_migration_file *migf,
+ struct mlx5_vhca_recv_buf *recv_buf,
+ u32 *mkey)
{
- size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE);
- struct sg_dma_page_iter dma_iter;
+ size_t npages = migf ? DIV_ROUND_UP(migf->total_length, PAGE_SIZE) :
+ recv_buf->npages;
int err = 0, inlen;
__be64 *mtt;
void *mkc;
@@ -209,8 +232,17 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
DIV_ROUND_UP(npages, 2));
mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
- *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+ if (migf) {
+ struct sg_dma_page_iter dma_iter;
+
+ for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
+ *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
+ } else {
+ int i;
+
+ for (i = 0; i < npages; i++)
+ *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
+ }
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
@@ -223,7 +255,8 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
- MLX5_SET64(mkc, mkc, len, migf->total_length);
+ MLX5_SET64(mkc, mkc, len,
+ migf ? migf->total_length : (npages * PAGE_SIZE));
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
kvfree(in);
return err;
@@ -297,7 +330,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (err)
goto err_dma_map;
- err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
if (err)
goto err_create_mkey;
@@ -369,7 +402,7 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (err)
goto err_reg;
- err = _create_state_mkey(mdev, pdn, migf, &mkey);
+ err = _create_mkey(mdev, pdn, migf, NULL, &mkey);
if (err)
goto err_mkey;
@@ -391,3 +424,939 @@ end:
mutex_unlock(&migf->lock);
return err;
}
+
+static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap;
+ unsigned long curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+
+static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
+ struct mlx5vf_pci_core_device *mvdev,
+ struct rb_root_cached *ranges, u32 nnodes)
+{
+ int max_num_range =
+ MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ int record_size = MLX5_ST_SZ_BYTES(page_track_range);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ struct interval_tree_node *node = NULL;
+ u64 total_ranges_len = 0;
+ u32 num_ranges = nnodes;
+ u8 log_addr_space_size;
+ void *range_list_ptr;
+ void *obj_context;
+ void *cmd_hdr;
+ int inlen;
+ void *in;
+ int err;
+ int i;
+
+ if (num_ranges > max_num_range) {
+ combine_ranges(ranges, nnodes, max_num_range);
+ num_ranges = max_num_range;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) +
+ record_size * num_ranges;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type,
+ MLX5_OBJ_TYPE_PAGE_TRACK);
+ obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context);
+ MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
+ MLX5_SET(page_track, obj_context, track_type, 1);
+ MLX5_SET(page_track, obj_context, log_page_size,
+ ilog2(tracker->host_qp->tracked_page_size));
+ MLX5_SET(page_track, obj_context, log_msg_size,
+ ilog2(tracker->host_qp->max_msg_size));
+ MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn);
+ MLX5_SET(page_track, obj_context, num_ranges, num_ranges);
+
+ range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range);
+ node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
+ for (i = 0; i < num_ranges; i++) {
+ void *addr_range_i_base = range_list_ptr + record_size * i;
+ unsigned long length = node->last - node->start;
+
+ MLX5_SET64(page_track_range, addr_range_i_base, start_address,
+ node->start);
+ MLX5_SET64(page_track_range, addr_range_i_base, length, length);
+ total_ranges_len += length;
+ node = interval_tree_iter_next(node, 0, ULONG_MAX);
+ }
+
+ WARN_ON(node);
+ log_addr_space_size = ilog2(total_ranges_len);
+ if (log_addr_space_size <
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) ||
+ log_addr_space_size >
+ (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET(page_track, obj_context, log_addr_space_size,
+ log_addr_space_size);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto out;
+
+ tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
+ u32 tracker_id, unsigned long iova,
+ unsigned long length, u32 tracker_state)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_page_track_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker_id);
+
+ obj_context = MLX5_ADDR_OF(modify_page_track_obj_in, in, obj_context);
+ MLX5_SET64(page_track, obj_context, modify_field_select, 0x3);
+ MLX5_SET64(page_track, obj_context, range_start_address, iova);
+ MLX5_SET64(page_track, obj_context, length, length);
+ MLX5_SET(page_track, obj_context, state, tracker_state);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq_buf *buf, int nent,
+ int cqe_size)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf,
+ mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+ buf->cqe_size = cqe_size;
+ buf->nent = nent;
+ return 0;
+}
+
+static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf)
+{
+ struct mlx5_cqe64 *cqe64;
+ void *cqe;
+ int i;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_cq *cq)
+{
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+ mlx5_db_free(mdev, &cq->db);
+}
+
+static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
+{
+ if (type != MLX5_EVENT_TYPE_CQ_ERROR)
+ return;
+
+ set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq));
+}
+
+static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct mlx5_vhca_page_tracker *tracker =
+ mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ tracker, struct mlx5vf_pci_core_device, tracker);
+ struct mlx5_eqe *eqe = data;
+ u8 event_type = (u8)type;
+ u8 queue_type;
+ int qp_num;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ queue_type = eqe->data.qp_srq.type;
+ if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP)
+ break;
+ qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ if (qp_num != tracker->host_qp->qpn &&
+ qp_num != tracker->fw_qp->qpn)
+ break;
+ set_tracker_error(mvdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5vf_pci_core_device *mvdev =
+ container_of(mcq, struct mlx5vf_pci_core_device,
+ tracker.cq.mcq);
+
+ complete(&mvdev->tracker_comp);
+}
+
+static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker,
+ size_t ncqe)
+{
+ int cqe_size = cache_line_size() == 128 ? 128 : 64;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_vhca_cq *cq;
+ int inlen, err, eqn;
+ void *cqc, *in;
+ __be64 *pas;
+ int vector;
+
+ cq = &tracker->cq;
+ ncqe = roundup_pow_of_two(ncqe);
+ err = mlx5_db_alloc_node(mdev, &cq->db, mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ cq->ncqe = ncqe;
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ cq->mcq.cqe_sz = cqe_size;
+ err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size);
+ if (err)
+ goto err_db_free;
+
+ init_cq_frag_buf(&cq->buf);
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.frag_buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_buff;
+ }
+
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn);
+ if (err)
+ goto err_vec;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, tracker->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
+ cq->mcq.comp = mlx5vf_cq_complete;
+ cq->mcq.event = mlx5vf_cq_event;
+ err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_vec;
+
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ kvfree(in);
+ return 0;
+
+err_vec:
+ kvfree(in);
+err_buff:
+ mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
+err_db_free:
+ mlx5_db_free(mdev, &cq->db);
+ return err;
+}
+
+static struct mlx5_vhca_qp *
+mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr)
+{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct mlx5_vhca_qp *qp;
+ u8 log_rq_stride;
+ u8 log_rq_sz;
+ void *qpc;
+ int inlen;
+ void *in;
+ int err;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr);
+ log_rq_stride = ilog2(MLX5_SEND_WQE_DS);
+ log_rq_sz = ilog2(qp->rq.wqe_cnt);
+ err = mlx5_db_alloc_node(mdev, &qp->db, mdev->priv.numa_node);
+ if (err)
+ goto err_free;
+
+ if (max_recv_wr) {
+ err = mlx5_frag_buf_alloc_node(mdev,
+ wq_get_byte_sz(log_rq_sz, log_rq_stride),
+ &qp->buf, mdev->priv.numa_node);
+ if (err)
+ goto err_db_free;
+ mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc);
+ }
+
+ qp->rq.db = &qp->db.db[MLX5_RCV_DBR];
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ qp->buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, tracker->pdn);
+ MLX5_SET(qpc, qpc, uar_page, tracker->uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ if (max_recv_wr) {
+ MLX5_SET(qpc, qpc, cqn_rcv, tracker->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, log_rq_stride, log_rq_stride - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, log_rq_sz);
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+ mlx5_fill_page_frag_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ in, pas));
+ } else {
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ }
+
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kvfree(in);
+ if (err)
+ goto err_in;
+
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+ return qp;
+
+err_in:
+ if (max_recv_wr)
+ mlx5_frag_buf_free(mdev, &qp->buf);
+err_db_free:
+ mlx5_db_free(mdev, &qp->db);
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix;
+
+ WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt);
+ ix = qp->rq.pc & (qp->rq.wqe_cnt - 1);
+ data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix);
+ data->byte_count = cpu_to_be32(qp->max_msg_size);
+ data->lkey = cpu_to_be32(qp->recv_buf.mkey);
+ data->addr = cpu_to_be64(qp->recv_buf.next_rq_offset);
+ qp->rq.pc++;
+ /* Make sure that descriptors are written before doorbell record. */
+ dma_wmb();
+ *qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff);
+}
+
+static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 remote_qpn,
+ bool host_qp)
+{
+ u32 init_in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 rtr_in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+ u32 rts_in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+ void *qpc;
+ int ret;
+
+ /* Init */
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, init_in, qpc);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+ MLX5_SET(qpc, qpc, rre, 1);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ MLX5_SET(rst2init_qp_in, init_in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, init_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, init_in);
+ if (ret)
+ return ret;
+
+ if (host_qp) {
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int i;
+
+ for (i = 0; i < qp->rq.wqe_cnt; i++) {
+ mlx5vf_post_recv(qp);
+ recv_buf->next_rq_offset += qp->max_msg_size;
+ }
+ }
+
+ /* RTR */
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, rtr_in, qpc);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, MLX5_CAP_GEN(mdev, log_max_msg));
+ MLX5_SET(qpc, qpc, remote_qpn, remote_qpn);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+ MLX5_SET(init2rtr_qp_in, rtr_in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, rtr_in);
+ if (ret || host_qp)
+ return ret;
+
+ /* RTS */
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, rts_in, qpc);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
+ MLX5_SET(rtr2rts_qp_in, rts_in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, rts_in);
+}
+
+static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
+ mlx5_frag_buf_free(mdev, &qp->buf);
+ mlx5_db_free(mdev, &qp->db);
+ kfree(qp);
+}
+
+static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ /* Undo alloc_pages_bulk_array() */
+ for (i = 0; i < recv_buf->npages; i++)
+ __free_page(recv_buf->page_list[i]);
+
+ kvfree(recv_buf->page_list);
+}
+
+static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
+ unsigned int npages)
+{
+ unsigned int filled = 0, done = 0;
+ int i;
+
+ recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
+ GFP_KERNEL);
+ if (!recv_buf->page_list)
+ return -ENOMEM;
+
+ for (;;) {
+ filled = alloc_pages_bulk_array(GFP_KERNEL, npages - done,
+ recv_buf->page_list + done);
+ if (!filled)
+ goto err;
+
+ done += filled;
+ if (done == npages)
+ break;
+ }
+
+ recv_buf->npages = npages;
+ return 0;
+
+err:
+ for (i = 0; i < npages; i++) {
+ if (recv_buf->page_list[i])
+ __free_page(recv_buf->page_list[i]);
+ }
+
+ kvfree(recv_buf->page_list);
+ return -ENOMEM;
+}
+
+static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i, j;
+
+ recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
+ sizeof(*recv_buf->dma_addrs),
+ GFP_KERNEL);
+ if (!recv_buf->dma_addrs)
+ return -ENOMEM;
+
+ for (i = 0; i < recv_buf->npages; i++) {
+ recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
+ recv_buf->page_list[i],
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
+ goto error;
+ }
+ return 0;
+
+error:
+ for (j = 0; j < i; j++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+ return -ENOMEM;
+}
+
+static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_recv_buf *recv_buf)
+{
+ int i;
+
+ for (i = 0; i < recv_buf->npages; i++)
+ dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ kvfree(recv_buf->dma_addrs);
+}
+
+static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp)
+{
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+
+ mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
+ unregister_dma_recv_pages(mdev, recv_buf);
+ free_recv_pages(&qp->recv_buf);
+}
+
+static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
+ struct mlx5_vhca_qp *qp, u32 pdn,
+ u64 rq_size)
+{
+ unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE);
+ struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
+ int err;
+
+ err = alloc_recv_pages(recv_buf, npages);
+ if (err < 0)
+ return err;
+
+ err = register_dma_recv_pages(mdev, recv_buf);
+ if (err)
+ goto end;
+
+ err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
+ if (err)
+ goto err_create_mkey;
+
+ return 0;
+
+err_create_mkey:
+ unregister_dma_recv_pages(mdev, recv_buf);
+end:
+ free_recv_pages(recv_buf);
+ return err;
+}
+
+static void
+_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
+{
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
+
+ lockdep_assert_held(&mvdev->state_mutex);
+
+ if (!mvdev->log_active)
+ return;
+
+ WARN_ON(mvdev->mdev_detach);
+
+ mlx5_eq_notifier_unregister(mdev, &tracker->nb);
+ mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
+ mlx5vf_destroy_qp(mdev, tracker->fw_qp);
+ mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
+ mlx5vf_destroy_qp(mdev, tracker->host_qp);
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+ mlx5_put_uars_page(mdev, tracker->uar);
+ mvdev->log_active = false;
+}
+
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active)
+ goto end;
+
+ _mlx5vf_free_page_tracker_resources(mvdev);
+ mvdev->log_active = false;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+}
+
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes,
+ u64 *page_size)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ u8 log_tracked_page = ilog2(*page_size);
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_core_dev *mdev;
+ u32 max_msg_size = PAGE_SIZE;
+ u64 rq_size = SZ_2M;
+ u32 max_recv_wr;
+ int err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ if (mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ memset(tracker, 0, sizeof(*tracker));
+ tracker->uar = mlx5_get_uars_page(mdev);
+ if (IS_ERR(tracker->uar)) {
+ err = PTR_ERR(tracker->uar);
+ goto end;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &tracker->pdn);
+ if (err)
+ goto err_uar;
+
+ max_recv_wr = DIV_ROUND_UP_ULL(rq_size, max_msg_size);
+ err = mlx5vf_create_cq(mdev, tracker, max_recv_wr);
+ if (err)
+ goto err_dealloc_pd;
+
+ host_qp = mlx5vf_create_rc_qp(mdev, tracker, max_recv_wr);
+ if (IS_ERR(host_qp)) {
+ err = PTR_ERR(host_qp);
+ goto err_cq;
+ }
+
+ host_qp->max_msg_size = max_msg_size;
+ if (log_tracked_page < MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_min_page_size);
+ } else if (log_tracked_page > MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size)) {
+ log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
+ pg_track_log_max_page_size);
+ }
+
+ host_qp->tracked_page_size = (1ULL << log_tracked_page);
+ err = mlx5vf_alloc_qp_recv_resources(mdev, host_qp, tracker->pdn,
+ rq_size);
+ if (err)
+ goto err_host_qp;
+
+ fw_qp = mlx5vf_create_rc_qp(mdev, tracker, 0);
+ if (IS_ERR(fw_qp)) {
+ err = PTR_ERR(fw_qp);
+ goto err_recv_resources;
+ }
+
+ err = mlx5vf_activate_qp(mdev, host_qp, fw_qp->qpn, true);
+ if (err)
+ goto err_activate;
+
+ err = mlx5vf_activate_qp(mdev, fw_qp, host_qp->qpn, false);
+ if (err)
+ goto err_activate;
+
+ tracker->host_qp = host_qp;
+ tracker->fw_qp = fw_qp;
+ err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
+ if (err)
+ goto err_activate;
+
+ MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(mdev, &tracker->nb);
+ *page_size = host_qp->tracked_page_size;
+ mvdev->log_active = true;
+ mlx5vf_state_mutex_unlock(mvdev);
+ return 0;
+
+err_activate:
+ mlx5vf_destroy_qp(mdev, fw_qp);
+err_recv_resources:
+ mlx5vf_free_qp_recv_resources(mdev, host_qp);
+err_host_qp:
+ mlx5vf_destroy_qp(mdev, host_qp);
+err_cq:
+ mlx5vf_destroy_cq(mdev, &tracker->cq);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, tracker->pdn);
+err_uar:
+ mlx5_put_uars_page(mdev, tracker->uar);
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
+
+static void
+set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty)
+{
+ u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
+ u32 nent = size / entry_size;
+ struct page *page;
+ u64 addr;
+ u64 *buf;
+ int i;
+
+ if (WARN_ON(index >= qp->recv_buf.npages ||
+ (nent > qp->max_msg_size / entry_size)))
+ return;
+
+ page = qp->recv_buf.page_list[index];
+ buf = kmap_local_page(page);
+ for (i = 0; i < nent; i++) {
+ addr = MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_low);
+ addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
+ dirty_address_high) << 32;
+ iova_bitmap_set(dirty, addr, qp->tracked_page_size);
+ }
+ kunmap_local(buf);
+}
+
+static void
+mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ u32 size;
+ int ix;
+
+ qp->rq.cc++;
+ *tracker_status = be32_to_cpu(cqe->immediate) >> 28;
+ size = be32_to_cpu(cqe->byte_cnt);
+ ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
+
+ /* zero length CQE, no data */
+ WARN_ON(!size && *tracker_status == MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (size)
+ set_report_output(size, ix, qp, dirty);
+
+ qp->recv_buf.next_rq_offset = ix * qp->max_msg_size;
+ mlx5vf_post_recv(qp);
+}
+
+static void *get_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
+}
+
+static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n)
+{
+ void *cqe = get_cqe(cq, n & (cq->ncqe - 1));
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ncqe)))) {
+ return cqe64;
+ } else {
+ return NULL;
+ }
+}
+
+static int
+mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp,
+ struct iova_bitmap *dirty, int *tracker_status)
+{
+ struct mlx5_cqe64 *cqe;
+ u8 opcode;
+
+ cqe = get_sw_cqe(cq, cq->mcq.cons_index);
+ if (!cqe)
+ return CQ_EMPTY;
+
+ ++cq->mcq.cons_index;
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+ opcode = get_cqe_opcode(cqe);
+ switch (opcode) {
+ case MLX5_CQE_RESP_SEND_IMM:
+ mlx5vf_rq_cqe(qp, cqe, dirty, tracker_status);
+ return CQ_OK;
+ default:
+ return CQ_POLL_ERR;
+ }
+}
+
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length,
+ struct iova_bitmap *dirty)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(
+ vdev, struct mlx5vf_pci_core_device, core_device.vdev);
+ struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
+ struct mlx5_vhca_cq *cq = &tracker->cq;
+ struct mlx5_core_dev *mdev;
+ int poll_err, err;
+
+ mutex_lock(&mvdev->state_mutex);
+ if (!mvdev->log_active) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (mvdev->mdev_detach) {
+ err = -ENOTCONN;
+ goto end;
+ }
+
+ mdev = mvdev->mdev;
+ err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length,
+ MLX5_PAGE_TRACK_STATE_REPORTING);
+ if (err)
+ goto end;
+
+ tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING;
+ while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING &&
+ !tracker->is_err) {
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty,
+ &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
+ cq->mcq.cons_index);
+ poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp,
+ dirty, &tracker->status);
+ if (poll_err == CQ_EMPTY) {
+ wait_for_completion(&mvdev->tracker_comp);
+ continue;
+ }
+ }
+ if (poll_err == CQ_POLL_ERR) {
+ err = -EIO;
+ goto end;
+ }
+ mlx5_cq_set_ci(&cq->mcq);
+ }
+
+ if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR)
+ tracker->is_err = true;
+
+ if (tracker->is_err)
+ err = -EIO;
+end:
+ mlx5vf_state_mutex_unlock(mvdev);
+ return err;
+}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 8208f4701a90..921d5720a1e5 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -9,6 +9,8 @@
#include <linux/kernel.h>
#include <linux/vfio_pci_core.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
struct mlx5vf_async_data {
struct mlx5_async_work cb_work;
@@ -39,6 +41,56 @@ struct mlx5_vf_migration_file {
struct mlx5vf_async_data async_data;
};
+struct mlx5_vhca_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int cqe_size;
+ int nent;
+};
+
+struct mlx5_vhca_cq {
+ struct mlx5_vhca_cq_buf buf;
+ struct mlx5_db db;
+ struct mlx5_core_cq mcq;
+ size_t ncqe;
+};
+
+struct mlx5_vhca_recv_buf {
+ u32 npages;
+ struct page **page_list;
+ dma_addr_t *dma_addrs;
+ u32 next_rq_offset;
+ u32 mkey;
+};
+
+struct mlx5_vhca_qp {
+ struct mlx5_frag_buf buf;
+ struct mlx5_db db;
+ struct mlx5_vhca_recv_buf recv_buf;
+ u32 tracked_page_size;
+ u32 max_msg_size;
+ u32 qpn;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int wqe_cnt;
+ __be32 *db;
+ struct mlx5_frag_buf_ctrl fbc;
+ } rq;
+};
+
+struct mlx5_vhca_page_tracker {
+ u32 id;
+ u32 pdn;
+ u8 is_err:1;
+ struct mlx5_uars_page *uar;
+ struct mlx5_vhca_cq cq;
+ struct mlx5_vhca_qp *host_qp;
+ struct mlx5_vhca_qp *fw_qp;
+ struct mlx5_nb nb;
+ int status;
+};
+
struct mlx5vf_pci_core_device {
struct vfio_pci_core_device core_device;
int vf_id;
@@ -46,6 +98,8 @@ struct mlx5vf_pci_core_device {
u8 migrate_cap:1;
u8 deferred_reset:1;
u8 mdev_detach:1;
+ u8 log_active:1;
+ struct completion tracker_comp;
/* protect migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
@@ -53,6 +107,7 @@ struct mlx5vf_pci_core_device {
spinlock_t reset_lock;
struct mlx5_vf_migration_file *resuming_migf;
struct mlx5_vf_migration_file *saving_migf;
+ struct mlx5_vhca_page_tracker tracker;
struct workqueue_struct *cb_wq;
struct notifier_block nb;
struct mlx5_core_dev *mdev;
@@ -63,7 +118,8 @@ int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
- const struct vfio_migration_ops *mig_ops);
+ const struct vfio_migration_ops *mig_ops,
+ const struct vfio_log_ops *log_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
@@ -73,4 +129,9 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
+int mlx5vf_start_page_tracker(struct vfio_device *vdev,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
+int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
+ unsigned long length, struct iova_bitmap *dirty);
#endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index a9b63d15c5d3..fd6ccb8454a2 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -579,8 +579,41 @@ static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
.migration_get_state = mlx5vf_pci_get_device_state,
};
+static const struct vfio_log_ops mlx5vf_pci_log_ops = {
+ .log_start = mlx5vf_start_page_tracker,
+ .log_stop = mlx5vf_stop_page_tracker,
+ .log_read_and_clear = mlx5vf_tracker_read_and_clear,
+};
+
+static int mlx5vf_pci_init_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+ int ret;
+
+ ret = vfio_pci_core_init_dev(core_vdev);
+ if (ret)
+ return ret;
+
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops,
+ &mlx5vf_pci_log_ops);
+
+ return 0;
+}
+
+static void mlx5vf_pci_release_dev(struct vfio_device *core_vdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
+ struct mlx5vf_pci_core_device, core_device.vdev);
+
+ mlx5vf_cmd_remove_migratable(mvdev);
+ vfio_pci_core_release_dev(core_vdev);
+}
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
+ .init = mlx5vf_pci_init_dev,
+ .release = mlx5vf_pci_release_dev,
.open_device = mlx5vf_pci_open_device,
.close_device = mlx5vf_pci_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -598,21 +631,19 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
struct mlx5vf_pci_core_device *mvdev;
int ret;
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev)
- return -ENOMEM;
- vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
+ mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev,
+ &pdev->dev, &mlx5vf_pci_ops);
+ if (IS_ERR(mvdev))
+ return PTR_ERR(mvdev);
+
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- mlx5vf_cmd_remove_migratable(mvdev);
- vfio_pci_core_uninit_device(&mvdev->core_device);
- kfree(mvdev);
+out_put_vdev:
+ vfio_put_device(&mvdev->core_device.vdev);
return ret;
}
@@ -621,9 +652,7 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev)
struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
vfio_pci_core_unregister_device(&mvdev->core_device);
- mlx5vf_cmd_remove_migratable(mvdev);
- vfio_pci_core_uninit_device(&mvdev->core_device);
- kfree(mvdev);
+ vfio_put_device(&mvdev->core_device.vdev);
}
static const struct pci_device_id mlx5vf_pci_table[] = {
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 4d1a97415a27..1d4919edfbde 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
@@ -127,6 +127,8 @@ static int vfio_pci_open_device(struct vfio_device *core_vdev)
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
.open_device = vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
@@ -146,20 +148,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (vfio_pci_is_denylisted(pdev))
return -EINVAL;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
- vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops);
+ vdev = vfio_alloc_device(vfio_pci_core_device, vdev, &pdev->dev,
+ &vfio_pci_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
dev_set_drvdata(&pdev->dev, vdev);
ret = vfio_pci_core_register_device(vdev);
if (ret)
- goto out_free;
+ goto out_put_vdev;
return 0;
-out_free:
- vfio_pci_core_uninit_device(vdev);
- kfree(vdev);
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
return ret;
}
@@ -168,8 +169,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
vfio_pci_core_unregister_device(vdev);
- vfio_pci_core_uninit_device(vdev);
- kfree(vdev);
+ vfio_put_device(&vdev->vdev);
}
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 442d3ba4122b..4a350421c5f6 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -26,7 +26,7 @@
#include <linux/vfio.h>
#include <linux/slab.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
@@ -1166,7 +1166,7 @@ static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
flags = le16_to_cpu(*pflags);
/* MSI is enabled via ioctl */
- if (!is_msi(vdev))
+ if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX)
flags &= ~PCI_MSI_FLAGS_ENABLE;
/* Check queue size */
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index c8d3b0450fb3..badc9d828cac 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -28,7 +28,7 @@
#include <linux/nospec.h>
#include <linux/sched/mm.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "core driver for VFIO based PCI devices"
@@ -41,6 +41,23 @@ static bool disable_idle_d3;
static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
static LIST_HEAD(vfio_pci_sriov_pfs);
+struct vfio_pci_dummy_resource {
+ struct resource resource;
+ int index;
+ struct list_head res_next;
+};
+
+struct vfio_pci_vf_token {
+ struct mutex lock;
+ uuid_t uuid;
+ int users;
+};
+
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -260,16 +277,189 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
return ret;
}
+static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *efdctx)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ if (vdev->pm_runtime_engaged) {
+ up_write(&vdev->memory_lock);
+ return -EINVAL;
+ }
+
+ vdev->pm_runtime_engaged = true;
+ vdev->pm_wake_eventfd_ctx = efdctx;
+ pm_runtime_put_noidle(&vdev->pdev->dev);
+ up_write(&vdev->memory_lock);
+
+ return 0;
+}
+
+static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
+ * will be decremented. The pm_runtime_put() will be invoked again
+ * while returning from the ioctl and then the device can go into
+ * runtime suspended state.
+ */
+ return vfio_pci_runtime_pm_entry(vdev, NULL);
+}
+
+static int vfio_pci_core_pm_entry_with_wakeup(
+ struct vfio_device *device, u32 flags,
+ struct vfio_device_low_power_entry_with_wakeup __user *arg,
+ size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ struct vfio_device_low_power_entry_with_wakeup entry;
+ struct eventfd_ctx *efdctx;
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
+ sizeof(entry));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&entry, arg, sizeof(entry)))
+ return -EFAULT;
+
+ if (entry.wakeup_eventfd < 0)
+ return -EINVAL;
+
+ efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
+ if (ret)
+ eventfd_ctx_put(efdctx);
+
+ return ret;
+}
+
+static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ if (vdev->pm_runtime_engaged) {
+ vdev->pm_runtime_engaged = false;
+ pm_runtime_get_noresume(&vdev->pdev->dev);
+
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
+ vdev->pm_wake_eventfd_ctx = NULL;
+ }
+ }
+}
+
+static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
+{
+ /*
+ * The vdev power related flags are protected with 'memory_lock'
+ * semaphore.
+ */
+ down_write(&vdev->memory_lock);
+ __vfio_pci_runtime_pm_exit(vdev);
+ up_write(&vdev->memory_lock);
+}
+
+static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
+ void __user *arg, size_t argsz)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(device, struct vfio_pci_core_device, vdev);
+ int ret;
+
+ ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ /*
+ * The device is always in the active state here due to pm wrappers
+ * around ioctls. If the device had entered a low power state and
+ * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
+ * already signaled the eventfd and exited low power mode itself.
+ * pm_runtime_engaged protects the redundant call here.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vfio_pci_core_runtime_suspend(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ down_write(&vdev->memory_lock);
+ /*
+ * The user can move the device into D3hot state before invoking
+ * power management IOCTL. Move the device into D0 state here and then
+ * the pci-driver core runtime PM suspend function will move the device
+ * into the low power state. Also, for the devices which have
+ * NoSoftRst-, it will help in restoring the original state
+ * (saved locally in 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ up_write(&vdev->memory_lock);
+
+ /*
+ * If INTx is enabled, then mask INTx before going into the runtime
+ * suspended state and unmask the same in the runtime resume.
+ * If INTx has already been masked by the user, then
+ * vfio_pci_intx_mask() will return false and in that case, INTx
+ * should not be unmasked in the runtime resume.
+ */
+ vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
+ vfio_pci_intx_mask(vdev));
+
+ return 0;
+}
+
+static int vfio_pci_core_runtime_resume(struct device *dev)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
+
+ /*
+ * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
+ * low power mode.
+ */
+ down_write(&vdev->memory_lock);
+ if (vdev->pm_wake_eventfd_ctx) {
+ eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
+ __vfio_pci_runtime_pm_exit(vdev);
+ }
+ up_write(&vdev->memory_lock);
+
+ if (vdev->pm_intx_masked)
+ vfio_pci_intx_unmask(vdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
/*
- * The dev_pm_ops needs to be provided to make pci-driver runtime PM working,
- * so use structure without any callbacks.
- *
* The pci-driver core runtime PM routines always save the device state
* before going into suspended state. If the device is going into low power
* state with only with runtime PM ops, then no explicit handling is needed
* for the devices which have NoSoftRst-.
*/
-static const struct dev_pm_ops vfio_pci_core_pm_ops = { };
+static const struct dev_pm_ops vfio_pci_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
+ vfio_pci_core_runtime_resume,
+ NULL)
+};
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
{
@@ -371,6 +561,18 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
/*
* This function can be invoked while the power state is non-D0.
+ * This non-D0 power state can be with or without runtime PM.
+ * vfio_pci_runtime_pm_exit() will internally increment the usage
+ * count corresponding to pm_runtime_put() called during low power
+ * feature entry and then pm_runtime_resume() will wake up the device,
+ * if the device has already gone into the suspended state. Otherwise,
+ * the vfio_pci_set_power_state() will change the device power state
+ * to D0.
+ */
+ vfio_pci_runtime_pm_exit(vdev);
+ pm_runtime_resume(&pdev->dev);
+
+ /*
* This function calls __pci_reset_function_locked() which internally
* can use pci_pm_reset() for the function reset. pci_pm_reset() will
* fail if the power state is non-D0. Also, for the devices which
@@ -645,10 +847,10 @@ static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
return vfio_info_add_capability(caps, &header, sizeof(header));
}
-int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data)
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data)
{
struct vfio_pci_region *region;
@@ -670,508 +872,532 @@ int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
return 0;
}
-EXPORT_SYMBOL_GPL(vfio_pci_register_dev_region);
+EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
-long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
- unsigned long arg)
+static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
+ struct vfio_device_info __user *arg)
{
- struct vfio_pci_core_device *vdev =
- container_of(core_vdev, struct vfio_pci_core_device, vdev);
- unsigned long minsz;
-
- if (cmd == VFIO_DEVICE_GET_INFO) {
- struct vfio_device_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned long capsz;
- int ret;
-
- minsz = offsetofend(struct vfio_device_info, num_irqs);
+ unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
+ struct vfio_device_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
+ int ret;
- /* For backward compatibility, cannot require this */
- capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz)
- return -EINVAL;
+ if (info.argsz < minsz)
+ return -EINVAL;
- if (info.argsz >= capsz) {
- minsz = capsz;
- info.cap_offset = 0;
- }
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0;
+ }
- info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
- if (vdev->reset_works)
- info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ if (vdev->reset_works)
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
- info.num_irqs = VFIO_PCI_NUM_IRQS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
- ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
- if (ret && ret != -ENODEV) {
- pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
- return ret;
- }
+ ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev,
+ "Failed to setup zPCI info capabilities\n");
+ return ret;
+ }
- if (caps.size) {
- info.flags |= VFIO_DEVICE_FLAGS_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
+ if (caps.size) {
+ info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
}
-
- kfree(caps.buf);
+ info.cap_offset = sizeof(*arg);
}
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ kfree(caps.buf);
+ }
- } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
- struct pci_dev *pdev = vdev->pdev;
- struct vfio_region_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- minsz = offsetofend(struct vfio_region_info, offset);
+static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
+ struct vfio_region_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz)
- return -EINVAL;
+ if (info.argsz < minsz)
+ return -EINVAL;
- switch (info.index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pdev->cfg_size;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pdev->cfg_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ info.flags = 0;
break;
- case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- info.flags = 0;
- break;
- }
+ }
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- if (vdev->bar_mmap_supported[info.index]) {
- info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
- if (info.index == vdev->msix_bar) {
- ret = msix_mmappable_cap(vdev, &caps);
- if (ret)
- return ret;
- }
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ if (vdev->bar_mmap_supported[info.index]) {
+ info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ if (info.index == vdev->msix_bar) {
+ ret = msix_mmappable_cap(vdev, &caps);
+ if (ret)
+ return ret;
}
+ }
- break;
- case VFIO_PCI_ROM_REGION_INDEX:
- {
- void __iomem *io;
- size_t size;
- u16 cmd;
-
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.flags = 0;
+ break;
+ case VFIO_PCI_ROM_REGION_INDEX: {
+ void __iomem *io;
+ size_t size;
+ u16 cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+
+ /* Report the BAR size, not the ROM size */
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ /* Shadow ROMs appear as PCI option ROMs */
+ if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW)
+ info.size = 0x20000;
+ else
+ break;
+ }
- /* Report the BAR size, not the ROM size */
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- /* Shadow ROMs appear as PCI option ROMs */
- if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW)
- info.size = 0x20000;
- else
- break;
- }
+ /*
+ * Is it really there? Enable memory decode for implicit access
+ * in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
+ info.size = 0;
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
- /*
- * Is it really there? Enable memory decode for
- * implicit access in pci_map_rom().
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- io = pci_map_rom(pdev, &size);
- if (io) {
- info.flags = VFIO_REGION_INFO_FLAG_READ;
- pci_unmap_rom(pdev, io);
- } else {
- info.size = 0;
- }
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
- break;
- }
- case VFIO_PCI_VGA_REGION_INDEX:
- if (!vdev->has_vga)
- return -EINVAL;
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0xc0000;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ default: {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1
+ };
- break;
- default:
- {
- struct vfio_region_info_cap_type cap_type = {
- .header.id = VFIO_REGION_INFO_CAP_TYPE,
- .header.version = 1 };
+ if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+ info.index = array_index_nospec(
+ info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
- if (info.index >=
- VFIO_PCI_NUM_REGIONS + vdev->num_regions)
- return -EINVAL;
- info.index = array_index_nospec(info.index,
- VFIO_PCI_NUM_REGIONS +
- vdev->num_regions);
+ i = info.index - VFIO_PCI_NUM_REGIONS;
- i = info.index - VFIO_PCI_NUM_REGIONS;
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vdev->region[i].size;
- info.flags = vdev->region[i].flags;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
- cap_type.type = vdev->region[i].type;
- cap_type.subtype = vdev->region[i].subtype;
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
- ret = vfio_info_add_capability(&caps, &cap_type.header,
- sizeof(cap_type));
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(
+ vdev, &vdev->region[i], &caps);
if (ret)
return ret;
-
- if (vdev->region[i].ops->add_capability) {
- ret = vdev->region[i].ops->add_capability(vdev,
- &vdev->region[i], &caps);
- if (ret)
- return ret;
- }
- }
}
+ }
+ }
- if (caps.size) {
- info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- info.cap_offset = 0;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user(arg + 1, caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
}
-
- kfree(caps.buf);
+ info.cap_offset = sizeof(*arg);
}
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ kfree(caps.buf);
+ }
- } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
- struct vfio_irq_info info;
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- minsz = offsetofend(struct vfio_irq_info, count);
+static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_info __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_info, count);
+ struct vfio_irq_info info;
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
- if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
- return -EINVAL;
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
- switch (info.index) {
- case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
- case VFIO_PCI_REQ_IRQ_INDEX:
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
break;
- case VFIO_PCI_ERR_IRQ_INDEX:
- if (pci_is_pcie(vdev->pdev))
- break;
- fallthrough;
- default:
- return -EINVAL;
- }
-
- info.flags = VFIO_IRQ_INFO_EVENTFD;
-
- info.count = vfio_pci_get_irq_count(vdev, info.index);
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
- if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
- info.flags |= (VFIO_IRQ_INFO_MASKABLE |
- VFIO_IRQ_INFO_AUTOMASKED);
- else
- info.flags |= VFIO_IRQ_INFO_NORESIZE;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
+ info.count = vfio_pci_get_irq_count(vdev, info.index);
- } else if (cmd == VFIO_DEVICE_SET_IRQS) {
- struct vfio_irq_set hdr;
- u8 *data = NULL;
- int max, ret = 0;
- size_t data_size = 0;
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |=
+ (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
- minsz = offsetofend(struct vfio_irq_set, count);
+ return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
+}
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
+ struct vfio_irq_set __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_irq_set, count);
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int max, ret = 0;
+ size_t data_size = 0;
- max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
- VFIO_PCI_NUM_IRQS, &data_size);
- if (ret)
- return ret;
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
- }
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
- mutex_lock(&vdev->igate);
+ if (data_size) {
+ data = memdup_user(&arg->data, data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
- ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
- hdr.start, hdr.count, data);
+ mutex_lock(&vdev->igate);
- mutex_unlock(&vdev->igate);
- kfree(data);
+ ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
+ hdr.count, data);
- return ret;
+ mutex_unlock(&vdev->igate);
+ kfree(data);
- } else if (cmd == VFIO_DEVICE_RESET) {
- int ret;
+ return ret;
+}
- if (!vdev->reset_works)
- return -EINVAL;
+static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
+ void __user *arg)
+{
+ int ret;
- vfio_pci_zap_and_down_write_memory_lock(vdev);
+ if (!vdev->reset_works)
+ return -EINVAL;
- /*
- * This function can be invoked while the power state is non-D0.
- * If pci_try_reset_function() has been called while the power
- * state is non-D0, then pci_try_reset_function() will
- * internally set the power state to D0 without vfio driver
- * involvement. For the devices which have NoSoftRst-, the
- * reset function can cause the PCI config space reset without
- * restoring the original state (saved locally in
- * 'vdev->pm_save').
- */
- vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
- ret = pci_try_reset_function(vdev->pdev);
- up_write(&vdev->memory_lock);
+ /*
+ * This function can be invoked while the power state is non-D0. If
+ * pci_try_reset_function() has been called while the power state is
+ * non-D0, then pci_try_reset_function() will internally set the power
+ * state to D0 without vfio driver involvement. For the devices which
+ * have NoSoftRst-, the reset function can cause the PCI config space
+ * reset without restoring the original state (saved locally in
+ * 'vdev->pm_save').
+ */
+ vfio_pci_set_power_state(vdev, PCI_D0);
- return ret;
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
- } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
- struct vfio_pci_hot_reset_info hdr;
- struct vfio_pci_fill_info fill = { 0 };
- struct vfio_pci_dependent_device *devices = NULL;
- bool slot = false;
- int ret = 0;
+ return ret;
+}
- minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
+static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset_info __user *arg)
+{
+ unsigned long minsz =
+ offsetofend(struct vfio_pci_hot_reset_info, count);
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- if (hdr.argsz < minsz)
- return -EINVAL;
+ if (hdr.argsz < minsz)
+ return -EINVAL;
- hdr.flags = 0;
+ hdr.flags = 0;
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
- /* How many devices are affected? */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &fill.max, slot);
- if (ret)
- return ret;
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
- WARN_ON(!fill.max); /* Should always be at least one */
+ WARN_ON(!fill.max); /* Should always be at least one */
- /*
- * If there's enough space, fill it now, otherwise return
- * -ENOSPC and the number of devices affected.
- */
- if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
- ret = -ENOSPC;
- hdr.count = fill.max;
- goto reset_info_exit;
- }
+ /*
+ * If there's enough space, fill it now, otherwise return -ENOSPC and
+ * the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
- devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
- fill.devices = devices;
+ fill.devices = devices;
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_fill_devs,
- &fill, slot);
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
+ &fill, slot);
- /*
- * If a device was removed between counting and filling,
- * we may come up short of fill.max. If a device was
- * added, we'll have a return of -EAGAIN above.
- */
- if (!ret)
- hdr.count = fill.cur;
+ /*
+ * If a device was removed between counting and filling, we may come up
+ * short of fill.max. If a device was added, we'll have a return of
+ * -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
reset_info_exit:
- if (copy_to_user((void __user *)arg, &hdr, minsz))
+ if (copy_to_user(arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user(&arg->devices, devices,
+ hdr.count * sizeof(*devices)))
ret = -EFAULT;
+ }
- if (!ret) {
- if (copy_to_user((void __user *)(arg + minsz), devices,
- hdr.count * sizeof(*devices)))
- ret = -EFAULT;
- }
+ kfree(devices);
+ return ret;
+}
- kfree(devices);
- return ret;
+static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_hot_reset __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct file **files;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int file_idx, count = 0, ret = 0;
- } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
- struct vfio_pci_hot_reset hdr;
- int32_t *group_fds;
- struct file **files;
- struct vfio_pci_group_info info;
- bool slot = false;
- int file_idx, count = 0, ret = 0;
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
- minsz = offsetofend(struct vfio_pci_hot_reset, count);
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
- if (copy_from_user(&hdr, (void __user *)arg, minsz))
- return -EFAULT;
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
- if (hdr.argsz < minsz || hdr.flags)
- return -EINVAL;
+ /*
+ * We can't let userspace give us an arbitrarily large buffer to copy,
+ * so verify how many we think there could be. Note groups can have
+ * multiple devices so one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
- /* Can we do a slot or bus reset or neither? */
- if (!pci_probe_reset_slot(vdev->pdev->slot))
- slot = true;
- else if (pci_probe_reset_bus(vdev->pdev->bus))
- return -ENODEV;
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
- /*
- * We can't let userspace give us an arbitrarily large
- * buffer to copy, so verify how many we think there
- * could be. Note groups can have multiple devices so
- * one group per device is the max.
- */
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_count_devs,
- &count, slot);
- if (ret)
- return ret;
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+ if (!group_fds || !files) {
+ kfree(group_fds);
+ kfree(files);
+ return -ENOMEM;
+ }
- /* Somewhere between 1 and count is OK */
- if (!hdr.count || hdr.count > count)
- return -EINVAL;
+ if (copy_from_user(group_fds, arg->group_fds,
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(files);
+ return -EFAULT;
+ }
- group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
- files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
- if (!group_fds || !files) {
- kfree(group_fds);
- kfree(files);
- return -ENOMEM;
- }
+ /*
+ * For each group_fd, get the group through the vfio external user
+ * interface and store the group and iommu ID. This ensures the group
+ * is held across the reset.
+ */
+ for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+ struct file *file = fget(group_fds[file_idx]);
- if (copy_from_user(group_fds, (void __user *)(arg + minsz),
- hdr.count * sizeof(*group_fds))) {
- kfree(group_fds);
- kfree(files);
- return -EFAULT;
+ if (!file) {
+ ret = -EBADF;
+ break;
}
- /*
- * For each group_fd, get the group through the vfio external
- * user interface and store the group and iommu ID. This
- * ensures the group is held across the reset.
- */
- for (file_idx = 0; file_idx < hdr.count; file_idx++) {
- struct file *file = fget(group_fds[file_idx]);
-
- if (!file) {
- ret = -EBADF;
- break;
- }
-
- /* Ensure the FD is a vfio group FD.*/
- if (!vfio_file_iommu_group(file)) {
- fput(file);
- ret = -EINVAL;
- break;
- }
-
- files[file_idx] = file;
+ /* Ensure the FD is a vfio group FD.*/
+ if (!vfio_file_is_group(file)) {
+ fput(file);
+ ret = -EINVAL;
+ break;
}
- kfree(group_fds);
+ files[file_idx] = file;
+ }
- /* release reference to groups on error */
- if (ret)
- goto hot_reset_release;
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
- info.count = hdr.count;
- info.files = files;
+ info.count = hdr.count;
+ info.files = files;
- ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+ ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
hot_reset_release:
- for (file_idx--; file_idx >= 0; file_idx--)
- fput(files[file_idx]);
+ for (file_idx--; file_idx >= 0; file_idx--)
+ fput(files[file_idx]);
- kfree(files);
- return ret;
- } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
- struct vfio_device_ioeventfd ioeventfd;
- int count;
+ kfree(files);
+ return ret;
+}
- minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
+ struct vfio_device_ioeventfd __user *arg)
+{
+ unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
+ struct vfio_device_ioeventfd ioeventfd;
+ int count;
- if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
- return -EFAULT;
+ if (copy_from_user(&ioeventfd, arg, minsz))
+ return -EFAULT;
- if (ioeventfd.argsz < minsz)
- return -EINVAL;
+ if (ioeventfd.argsz < minsz)
+ return -EINVAL;
- if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
- return -EINVAL;
+ if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
+ return -EINVAL;
- count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
+ count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
- if (hweight8(count) != 1 || ioeventfd.fd < -1)
- return -EINVAL;
+ if (hweight8(count) != 1 || ioeventfd.fd < -1)
+ return -EINVAL;
- return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
- ioeventfd.data, count, ioeventfd.fd);
+ return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
+ ioeventfd.fd);
+}
+
+long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ return vfio_pci_ioctl_get_info(vdev, uarg);
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ return vfio_pci_ioctl_get_irq_info(vdev, uarg);
+ case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
+ return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
+ case VFIO_DEVICE_GET_REGION_INFO:
+ return vfio_pci_ioctl_get_region_info(vdev, uarg);
+ case VFIO_DEVICE_IOEVENTFD:
+ return vfio_pci_ioctl_ioeventfd(vdev, uarg);
+ case VFIO_DEVICE_PCI_HOT_RESET:
+ return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
+ case VFIO_DEVICE_RESET:
+ return vfio_pci_ioctl_reset(vdev, uarg);
+ case VFIO_DEVICE_SET_IRQS:
+ return vfio_pci_ioctl_set_irqs(vdev, uarg);
+ default:
+ return -ENOTTY;
}
- return -ENOTTY;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
- void __user *arg, size_t argsz)
+ uuid_t __user *arg, size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
@@ -1202,6 +1428,13 @@ int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz)
{
switch (flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
+ return vfio_pci_core_pm_entry(device, flags, arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
+ return vfio_pci_core_pm_entry_with_wakeup(device, flags,
+ arg, argsz);
+ case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
+ return vfio_pci_core_pm_exit(device, flags, arg, argsz);
case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
return vfio_pci_core_feature_token(device, flags, arg, argsz);
default:
@@ -1214,31 +1447,47 @@ static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ int ret;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
+ ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
+ if (ret) {
+ pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
+ ret);
+ return -EIO;
+ }
+
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
- return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+ break;
case VFIO_PCI_ROM_REGION_INDEX:
if (iswrite)
- return -EINVAL;
- return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+ ret = -EINVAL;
+ else
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+ break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+ break;
case VFIO_PCI_VGA_REGION_INDEX:
- return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ break;
+
default:
index -= VFIO_PCI_NUM_REGIONS;
- return vdev->region[index].ops->rw(vdev, buf,
+ ret = vdev->region[index].ops->rw(vdev, buf,
count, ppos, iswrite);
+ break;
}
- return -EINVAL;
+ pm_runtime_put(&vdev->pdev->dev);
+ return ret;
}
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
@@ -1433,7 +1682,11 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
- if (!__vfio_pci_memory_enabled(vdev)) {
+ /*
+ * Memory region cannot be accessed if the low power feature is engaged
+ * or memory access is disabled.
+ */
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
ret = VM_FAULT_SIGBUS;
goto up_out;
}
@@ -1825,12 +2078,12 @@ static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
VGA_RSRC_LEGACY_MEM);
}
-void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
- struct pci_dev *pdev,
- const struct vfio_device_ops *vfio_pci_ops)
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
{
- vfio_init_group_dev(&vdev->vdev, &pdev->dev, vfio_pci_ops);
- vdev->pdev = pdev;
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
+ vdev->pdev = to_pci_dev(core_vdev->dev);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
@@ -1841,19 +2094,24 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
+EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
-void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev)
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
mutex_destroy(&vdev->vma_lock);
- vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev->region);
kfree(vdev->pm_save);
+ vfio_free_device(core_vdev);
}
-EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
+EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{
@@ -1875,6 +2133,11 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
+ if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
+ vdev->vdev.log_ops->log_stop &&
+ vdev->vdev.log_ops->log_read_and_clear))
+ return -EINVAL;
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
@@ -2148,6 +2411,15 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
goto err_unlock;
}
+ /*
+ * Some of the devices in the dev_set can be in the runtime suspended
+ * state. Increment the usage count for all the devices in the dev_set
+ * before reset and decrement the same after reset.
+ */
+ ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
+ if (ret)
+ goto err_unlock;
+
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
/*
* Test whether all the affected devices are contained by the
@@ -2203,6 +2475,9 @@ err_undo:
else
mutex_unlock(&cur->vma_lock);
}
+
+ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 352c725ccf18..5e6ca5926954 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -15,7 +15,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define OPREGION_SIZE (8 * 1024)
@@ -257,7 +257,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
}
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
@@ -402,7 +402,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
&vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
@@ -422,7 +422,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
- ret = vfio_pci_register_dev_region(vdev,
+ ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
&vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 6069a11fb51a..40c3d7cf163f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -20,7 +20,33 @@
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
+
+struct vfio_pci_irq_ctx {
+ struct eventfd_ctx *trigger;
+ struct virqfd *unmask;
+ struct virqfd *mask;
+ char *name;
+ bool masked;
+ struct irq_bypass_producer producer;
+};
+
+static bool irq_is(struct vfio_pci_core_device *vdev, int type)
+{
+ return vdev->irq_type == type;
+}
+
+static bool is_intx(struct vfio_pci_core_device *vdev)
+{
+ return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
+}
+
+static bool is_irq_none(struct vfio_pci_core_device *vdev)
+{
+ return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
+ vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+}
/*
* INTx
@@ -33,10 +59,12 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
eventfd_signal(vdev->ctx[0].trigger, 1);
}
-void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
+ bool masked_changed = false;
spin_lock_irqsave(&vdev->irqlock, flags);
@@ -60,9 +88,11 @@ void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
disable_irq_nosync(pdev->irq);
vdev->ctx[0].masked = true;
+ masked_changed = true;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
+ return masked_changed;
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h
new file mode 100644
index 000000000000..5e4fa69aee16
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_priv.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef VFIO_PCI_PRIV_H
+#define VFIO_PCI_PRIV_H
+
+#include <linux/vfio_pci_core.h>
+
+/* Special capability IDs predefined access */
+#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
+#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
+
+/* Cap maximum number of ioeventfds per device (arbitrary) */
+#define VFIO_PCI_IOEVENTFD_MAX 1000
+
+struct vfio_pci_ioeventfd {
+ struct list_head next;
+ struct vfio_pci_core_device *vdev;
+ struct virqfd *virqfd;
+ void __iomem *addr;
+ uint64_t data;
+ loff_t pos;
+ int bar;
+ int count;
+ bool test_mem;
+};
+
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
+ unsigned index, unsigned start, unsigned count,
+ void *data);
+
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+#ifdef CONFIG_VFIO_PCI_VGA
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+#else
+static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite)
+{
+ return -EINVAL;
+}
+#endif
+
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd);
+
+int vfio_pci_init_perm_bits(void);
+void vfio_pci_uninit_perm_bits(void);
+
+int vfio_config_init(struct vfio_pci_core_device *vdev);
+void vfio_config_free(struct vfio_pci_core_device *vdev);
+
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state);
+
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev);
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
+ u16 cmd);
+
+#ifdef CONFIG_VFIO_PCI_IGD
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps);
+int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev);
+void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev);
+#else
+static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ return -ENODEV;
+}
+
+static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
+{
+ return 0;
+}
+
+static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
+{}
+#endif
+
+static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
+{
+ return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+}
+
+#endif
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 82ac1569deb0..e352a033b4ae 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,7 +17,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
#ifdef __LITTLE_ENDIAN
#define vfio_ioread64 ioread64
@@ -412,8 +412,8 @@ static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
}
-long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
- uint64_t data, int count, int fd)
+int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index 0cbdcd14f1c8..0990fdb146b7 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -15,7 +15,7 @@
#include <asm/pci_clp.h>
#include <asm/pci_io.h>
-#include <linux/vfio_pci_core.h>
+#include "vfio_pci_priv.h"
/*
* Add the Base PCI Function information to the device info region.
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 1aaa4f721bd2..eaea63e5294c 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
+#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include "vfio_platform_private.h"
@@ -40,20 +41,16 @@ static int get_amba_irq(struct vfio_platform_device *vdev, int i)
return ret ? ret : -ENXIO;
}
-static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
+static int vfio_amba_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_platform_device *vdev;
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+ struct amba_device *adev = to_amba_device(core_vdev->dev);
int ret;
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid);
- if (!vdev->name) {
- kfree(vdev);
+ if (!vdev->name)
return -ENOMEM;
- }
vdev->opaque = (void *) adev;
vdev->flags = VFIO_DEVICE_FLAGS_AMBA;
@@ -61,26 +58,67 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
vdev->get_irq = get_amba_irq;
vdev->reset_required = false;
- ret = vfio_platform_probe_common(vdev, &adev->dev);
- if (ret) {
+ ret = vfio_platform_init_common(vdev);
+ if (ret)
kfree(vdev->name);
- kfree(vdev);
- return ret;
- }
+ return ret;
+}
+
+static const struct vfio_device_ops vfio_amba_ops;
+static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_platform_device, vdev, &adev->dev,
+ &vfio_amba_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_put_vdev;
+
+ pm_runtime_enable(&adev->dev);
dev_set_drvdata(&adev->dev, vdev);
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
+ return ret;
+}
+
+static void vfio_amba_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+
+ vfio_platform_release_common(vdev);
+ kfree(vdev->name);
+ vfio_free_device(core_vdev);
}
static void vfio_amba_remove(struct amba_device *adev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&adev->dev);
- vfio_platform_remove_common(vdev);
- kfree(vdev->name);
- kfree(vdev);
+ vfio_unregister_group_dev(&vdev->vdev);
+ pm_runtime_disable(vdev->device);
+ vfio_put_device(&vdev->vdev);
}
+static const struct vfio_device_ops vfio_amba_ops = {
+ .name = "vfio-amba",
+ .init = vfio_amba_init_dev,
+ .release = vfio_amba_release_dev,
+ .open_device = vfio_platform_open_device,
+ .close_device = vfio_platform_close_device,
+ .ioctl = vfio_platform_ioctl,
+ .read = vfio_platform_read,
+ .write = vfio_platform_write,
+ .mmap = vfio_platform_mmap,
+};
+
static const struct amba_id pl330_ids[] = {
{ 0, 0 },
};
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 04f40c5acfd6..82cedcebfd90 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include "vfio_platform_private.h"
@@ -36,14 +37,11 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i)
return platform_get_irq_optional(pdev, i);
}
-static int vfio_platform_probe(struct platform_device *pdev)
+static int vfio_platform_init_dev(struct vfio_device *core_vdev)
{
- struct vfio_platform_device *vdev;
- int ret;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+ struct platform_device *pdev = to_platform_device(core_vdev->dev);
vdev->opaque = (void *) pdev;
vdev->name = pdev->name;
@@ -52,24 +50,64 @@ static int vfio_platform_probe(struct platform_device *pdev)
vdev->get_irq = get_platform_irq;
vdev->reset_required = reset_required;
- ret = vfio_platform_probe_common(vdev, &pdev->dev);
- if (ret) {
- kfree(vdev);
- return ret;
- }
+ return vfio_platform_init_common(vdev);
+}
+
+static const struct vfio_device_ops vfio_platform_ops;
+static int vfio_platform_probe(struct platform_device *pdev)
+{
+ struct vfio_platform_device *vdev;
+ int ret;
+
+ vdev = vfio_alloc_device(vfio_platform_device, vdev, &pdev->dev,
+ &vfio_platform_ops);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ ret = vfio_register_group_dev(&vdev->vdev);
+ if (ret)
+ goto out_put_vdev;
+
+ pm_runtime_enable(&pdev->dev);
dev_set_drvdata(&pdev->dev, vdev);
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vdev->vdev);
+ return ret;
+}
+
+static void vfio_platform_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_platform_device *vdev =
+ container_of(core_vdev, struct vfio_platform_device, vdev);
+
+ vfio_platform_release_common(vdev);
+ vfio_free_device(core_vdev);
}
static int vfio_platform_remove(struct platform_device *pdev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&pdev->dev);
- vfio_platform_remove_common(vdev);
- kfree(vdev);
+ vfio_unregister_group_dev(&vdev->vdev);
+ pm_runtime_disable(vdev->device);
+ vfio_put_device(&vdev->vdev);
return 0;
}
+static const struct vfio_device_ops vfio_platform_ops = {
+ .name = "vfio-platform",
+ .init = vfio_platform_init_dev,
+ .release = vfio_platform_release_dev,
+ .open_device = vfio_platform_open_device,
+ .close_device = vfio_platform_close_device,
+ .ioctl = vfio_platform_ioctl,
+ .read = vfio_platform_read,
+ .write = vfio_platform_write,
+ .mmap = vfio_platform_mmap,
+};
+
static struct platform_driver vfio_platform_driver = {
.probe = vfio_platform_probe,
.remove = vfio_platform_remove,
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 256f55b84e70..55dc4f43c31e 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -218,7 +218,7 @@ static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
return -EINVAL;
}
-static void vfio_platform_close_device(struct vfio_device *core_vdev)
+void vfio_platform_close_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -236,8 +236,9 @@ static void vfio_platform_close_device(struct vfio_device *core_vdev)
vfio_platform_regions_cleanup(vdev);
vfio_platform_irq_cleanup(vdev);
}
+EXPORT_SYMBOL_GPL(vfio_platform_close_device);
-static int vfio_platform_open_device(struct vfio_device *core_vdev)
+int vfio_platform_open_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -273,9 +274,10 @@ err_irq:
vfio_platform_regions_cleanup(vdev);
return ret;
}
+EXPORT_SYMBOL_GPL(vfio_platform_open_device);
-static long vfio_platform_ioctl(struct vfio_device *core_vdev,
- unsigned int cmd, unsigned long arg)
+long vfio_platform_ioctl(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -382,6 +384,7 @@ static long vfio_platform_ioctl(struct vfio_device *core_vdev,
return -ENOTTY;
}
+EXPORT_SYMBOL_GPL(vfio_platform_ioctl);
static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
char __user *buf, size_t count,
@@ -438,8 +441,8 @@ err:
return -EFAULT;
}
-static ssize_t vfio_platform_read(struct vfio_device *core_vdev,
- char __user *buf, size_t count, loff_t *ppos)
+ssize_t vfio_platform_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -460,6 +463,7 @@ static ssize_t vfio_platform_read(struct vfio_device *core_vdev,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(vfio_platform_read);
static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
const char __user *buf, size_t count,
@@ -515,8 +519,8 @@ err:
return -EFAULT;
}
-static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
- size_t count, loff_t *ppos)
+ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -537,6 +541,7 @@ static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __u
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(vfio_platform_write);
static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
struct vm_area_struct *vma)
@@ -558,7 +563,7 @@ static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
req_len, vma->vm_page_prot);
}
-static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
@@ -598,16 +603,7 @@ static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_stru
return -EINVAL;
}
-
-static const struct vfio_device_ops vfio_platform_ops = {
- .name = "vfio-platform",
- .open_device = vfio_platform_open_device,
- .close_device = vfio_platform_close_device,
- .ioctl = vfio_platform_ioctl,
- .read = vfio_platform_read,
- .write = vfio_platform_write,
- .mmap = vfio_platform_mmap,
-};
+EXPORT_SYMBOL_GPL(vfio_platform_mmap);
static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
struct device *dev)
@@ -639,55 +635,34 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
* If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
* valid checks. We cannot claim that this system is DT.
*/
-int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev)
+int vfio_platform_init_common(struct vfio_platform_device *vdev)
{
int ret;
-
- vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops);
+ struct device *dev = vdev->vdev.dev;
ret = vfio_platform_acpi_probe(vdev, dev);
if (ret)
ret = vfio_platform_of_probe(vdev, dev);
if (ret)
- goto out_uninit;
+ return ret;
vdev->device = dev;
+ mutex_init(&vdev->igate);
ret = vfio_platform_get_reset(vdev);
- if (ret && vdev->reset_required) {
+ if (ret && vdev->reset_required)
dev_err(dev, "No reset function found for device %s\n",
vdev->name);
- goto out_uninit;
- }
-
- ret = vfio_register_group_dev(&vdev->vdev);
- if (ret)
- goto put_reset;
-
- mutex_init(&vdev->igate);
-
- pm_runtime_enable(dev);
- return 0;
-
-put_reset:
- vfio_platform_put_reset(vdev);
-out_uninit:
- vfio_uninit_group_dev(&vdev->vdev);
return ret;
}
-EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
+EXPORT_SYMBOL_GPL(vfio_platform_init_common);
-void vfio_platform_remove_common(struct vfio_platform_device *vdev)
+void vfio_platform_release_common(struct vfio_platform_device *vdev)
{
- vfio_unregister_group_dev(&vdev->vdev);
-
- pm_runtime_disable(vdev->device);
vfio_platform_put_reset(vdev);
- vfio_uninit_group_dev(&vdev->vdev);
}
-EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
+EXPORT_SYMBOL_GPL(vfio_platform_release_common);
void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
{
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 691b43f4b2b2..8d8fab516849 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,9 +78,21 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
-void vfio_platform_remove_common(struct vfio_platform_device *vdev);
+int vfio_platform_init_common(struct vfio_platform_device *vdev);
+void vfio_platform_release_common(struct vfio_platform_device *vdev);
+
+int vfio_platform_open_device(struct vfio_device *core_vdev);
+void vfio_platform_close_device(struct vfio_device *core_vdev);
+long vfio_platform_ioctl(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg);
+ssize_t vfio_platform_read(struct vfio_device *core_vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos);
+ssize_t vfio_platform_write(struct vfio_device *core_vdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos);
+int vfio_platform_mmap(struct vfio_device *core_vdev,
+ struct vm_area_struct *vma);
int vfio_platform_irq_init(struct vfio_platform_device *vdev);
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 503bea6c843d..bcad54bbab08 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -3,6 +3,16 @@
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
+#ifndef __VFIO_VFIO_H__
+#define __VFIO_VFIO_H__
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+
+struct iommu_group;
+struct vfio_device;
+struct vfio_container;
enum vfio_group_type {
/*
@@ -28,6 +38,30 @@ enum vfio_group_type {
VFIO_NO_IOMMU,
};
+struct vfio_group {
+ struct device dev;
+ struct cdev cdev;
+ /*
+ * When drivers is non-zero a driver is attached to the struct device
+ * that provided the iommu_group and thus the iommu_group is a valid
+ * pointer. When drivers is 0 the driver is being detached. Once users
+ * reaches 0 then the iommu_group is invalid.
+ */
+ refcount_t drivers;
+ unsigned int container_users;
+ struct iommu_group *iommu_group;
+ struct vfio_container *container;
+ struct list_head device_list;
+ struct mutex device_lock;
+ struct list_head vfio_next;
+ struct list_head container_next;
+ enum vfio_group_type type;
+ struct mutex group_lock;
+ struct kvm *kvm;
+ struct file *opened_file;
+ struct blocking_notifier_head notifier;
+};
+
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
VFIO_IOMMU_CONTAINER_CLOSE = 0,
@@ -67,5 +101,33 @@ struct vfio_iommu_driver_ops {
enum vfio_iommu_notify_type event);
};
+struct vfio_iommu_driver {
+ const struct vfio_iommu_driver_ops *ops;
+ struct list_head vfio_next;
+};
+
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+
+bool vfio_assert_device_open(struct vfio_device *device);
+
+struct vfio_container *vfio_container_from_file(struct file *filep);
+int vfio_device_assign_container(struct vfio_device *device);
+void vfio_device_unassign_container(struct vfio_device *device);
+int vfio_container_attach_group(struct vfio_container *container,
+ struct vfio_group *group);
+void vfio_group_detach_container(struct vfio_group *group);
+void vfio_device_container_register(struct vfio_device *device);
+void vfio_device_container_unregister(struct vfio_device *device);
+long vfio_container_ioctl_check_extension(struct vfio_container *container,
+ unsigned long arg);
+int __init vfio_container_init(void);
+void vfio_container_cleanup(void);
+
+#ifdef CONFIG_VFIO_NOIOMMU
+extern bool vfio_noiommu __read_mostly;
+#else
+enum { vfio_noiommu = false };
+#endif
+
+#endif
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 8706482665d1..23c24fe98c00 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -37,7 +37,6 @@
#include <linux/vfio.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
-#include <linux/dma-iommu.h>
#include <linux/irqdomain.h>
#include "vfio.h"
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 7cb56c382c97..2d168793d4e1 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -32,6 +32,9 @@
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
+#include <linux/pm_runtime.h>
+#include <linux/interval_tree.h>
+#include <linux/iova_bitmap.h>
#include "vfio.h"
#define DRIVER_VERSION "0.3"
@@ -40,54 +43,14 @@
static struct vfio {
struct class *class;
- struct list_head iommu_drivers_list;
- struct mutex iommu_drivers_lock;
struct list_head group_list;
struct mutex group_lock; /* locks group_list */
struct ida group_ida;
dev_t group_devt;
+ struct class *device_class;
+ struct ida device_ida;
} vfio;
-struct vfio_iommu_driver {
- const struct vfio_iommu_driver_ops *ops;
- struct list_head vfio_next;
-};
-
-struct vfio_container {
- struct kref kref;
- struct list_head group_list;
- struct rw_semaphore group_lock;
- struct vfio_iommu_driver *iommu_driver;
- void *iommu_data;
- bool noiommu;
-};
-
-struct vfio_group {
- struct device dev;
- struct cdev cdev;
- refcount_t users;
- unsigned int container_users;
- struct iommu_group *iommu_group;
- struct vfio_container *container;
- struct list_head device_list;
- struct mutex device_lock;
- struct list_head vfio_next;
- struct list_head container_next;
- enum vfio_group_type type;
- unsigned int dev_counter;
- struct rw_semaphore group_rwsem;
- struct kvm *kvm;
- struct file *opened_file;
- struct blocking_notifier_head notifier;
-};
-
-#ifdef CONFIG_VFIO_NOIOMMU
-static bool noiommu __read_mostly;
-module_param_named(enable_unsafe_noiommu_mode,
- noiommu, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
-#endif
-
static DEFINE_XARRAY(vfio_device_set_xa);
static const struct file_operations vfio_group_fops;
@@ -162,146 +125,6 @@ static void vfio_release_device_set(struct vfio_device *device)
xa_unlock(&vfio_device_set_xa);
}
-#ifdef CONFIG_VFIO_NOIOMMU
-static void *vfio_noiommu_open(unsigned long arg)
-{
- if (arg != VFIO_NOIOMMU_IOMMU)
- return ERR_PTR(-EINVAL);
- if (!capable(CAP_SYS_RAWIO))
- return ERR_PTR(-EPERM);
-
- return NULL;
-}
-
-static void vfio_noiommu_release(void *iommu_data)
-{
-}
-
-static long vfio_noiommu_ioctl(void *iommu_data,
- unsigned int cmd, unsigned long arg)
-{
- if (cmd == VFIO_CHECK_EXTENSION)
- return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
-
- return -ENOTTY;
-}
-
-static int vfio_noiommu_attach_group(void *iommu_data,
- struct iommu_group *iommu_group, enum vfio_group_type type)
-{
- return 0;
-}
-
-static void vfio_noiommu_detach_group(void *iommu_data,
- struct iommu_group *iommu_group)
-{
-}
-
-static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
- .name = "vfio-noiommu",
- .owner = THIS_MODULE,
- .open = vfio_noiommu_open,
- .release = vfio_noiommu_release,
- .ioctl = vfio_noiommu_ioctl,
- .attach_group = vfio_noiommu_attach_group,
- .detach_group = vfio_noiommu_detach_group,
-};
-
-/*
- * Only noiommu containers can use vfio-noiommu and noiommu containers can only
- * use vfio-noiommu.
- */
-static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
- const struct vfio_iommu_driver *driver)
-{
- return container->noiommu == (driver->ops == &vfio_noiommu_ops);
-}
-#else
-static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
- const struct vfio_iommu_driver *driver)
-{
- return true;
-}
-#endif /* CONFIG_VFIO_NOIOMMU */
-
-/*
- * IOMMU driver registration
- */
-int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
-{
- struct vfio_iommu_driver *driver, *tmp;
-
- if (WARN_ON(!ops->register_device != !ops->unregister_device))
- return -EINVAL;
-
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver)
- return -ENOMEM;
-
- driver->ops = ops;
-
- mutex_lock(&vfio.iommu_drivers_lock);
-
- /* Check for duplicates */
- list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
- if (tmp->ops == ops) {
- mutex_unlock(&vfio.iommu_drivers_lock);
- kfree(driver);
- return -EINVAL;
- }
- }
-
- list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
-
- mutex_unlock(&vfio.iommu_drivers_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
-
-void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
-{
- struct vfio_iommu_driver *driver;
-
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
- if (driver->ops == ops) {
- list_del(&driver->vfio_next);
- mutex_unlock(&vfio.iommu_drivers_lock);
- kfree(driver);
- return;
- }
- }
- mutex_unlock(&vfio.iommu_drivers_lock);
-}
-EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
-
-static void vfio_group_get(struct vfio_group *group);
-
-/*
- * Container objects - containers are created when /dev/vfio/vfio is
- * opened, but their lifecycle extends until the last user is done, so
- * it's freed via kref. Must support container/group/device being
- * closed in any order.
- */
-static void vfio_container_get(struct vfio_container *container)
-{
- kref_get(&container->kref);
-}
-
-static void vfio_container_release(struct kref *kref)
-{
- struct vfio_container *container;
- container = container_of(kref, struct vfio_container, kref);
-
- kfree(container);
-}
-
-static void vfio_container_put(struct vfio_container *container)
-{
- kref_put(&container->kref, vfio_container_release);
-}
-
/*
* Group objects - create, release, get, put, search
*/
@@ -310,9 +133,13 @@ __vfio_group_get_from_iommu(struct iommu_group *iommu_group)
{
struct vfio_group *group;
+ /*
+ * group->iommu_group from the vfio.group_list cannot be NULL
+ * under the vfio.group_lock.
+ */
list_for_each_entry(group, &vfio.group_list, vfio_next) {
if (group->iommu_group == iommu_group) {
- vfio_group_get(group);
+ refcount_inc(&group->drivers);
return group;
}
}
@@ -335,7 +162,8 @@ static void vfio_group_release(struct device *dev)
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
mutex_destroy(&group->device_lock);
- iommu_group_put(group->iommu_group);
+ mutex_destroy(&group->group_lock);
+ WARN_ON(group->iommu_group);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
}
@@ -363,8 +191,8 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
cdev_init(&group->cdev, &vfio_group_fops);
group->cdev.owner = THIS_MODULE;
- refcount_set(&group->users, 1);
- init_rwsem(&group->group_rwsem);
+ refcount_set(&group->drivers, 1);
+ mutex_init(&group->group_lock);
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
group->iommu_group = iommu_group;
@@ -420,44 +248,64 @@ err_put:
return ret;
}
-static void vfio_group_put(struct vfio_group *group)
+static void vfio_device_remove_group(struct vfio_device *device)
{
- if (!refcount_dec_and_mutex_lock(&group->users, &vfio.group_lock))
+ struct vfio_group *group = device->group;
+ struct iommu_group *iommu_group;
+
+ if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
+ iommu_group_remove_device(device->dev);
+
+ /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */
+ if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock))
return;
+ list_del(&group->vfio_next);
/*
+ * We could concurrently probe another driver in the group that might
+ * race vfio_device_remove_group() with vfio_get_group(), so we have to
+ * ensure that the sysfs is all cleaned up under lock otherwise the
+ * cdev_device_add() will fail due to the name aready existing.
+ */
+ cdev_device_del(&group->cdev, &group->dev);
+
+ mutex_lock(&group->group_lock);
+ /*
* These data structures all have paired operations that can only be
- * undone when the caller holds a live reference on the group. Since all
- * pairs must be undone these WARN_ON's indicate some caller did not
+ * undone when the caller holds a live reference on the device. Since
+ * all pairs must be undone these WARN_ON's indicate some caller did not
* properly hold the group reference.
*/
WARN_ON(!list_empty(&group->device_list));
- WARN_ON(group->container || group->container_users);
WARN_ON(group->notifier.head);
- list_del(&group->vfio_next);
- cdev_device_del(&group->cdev, &group->dev);
+ /*
+ * Revoke all users of group->iommu_group. At this point we know there
+ * are no devices active because we are unplugging the last one. Setting
+ * iommu_group to NULL blocks all new users.
+ */
+ if (group->container)
+ vfio_group_detach_container(group);
+ iommu_group = group->iommu_group;
+ group->iommu_group = NULL;
+ mutex_unlock(&group->group_lock);
mutex_unlock(&vfio.group_lock);
+ iommu_group_put(iommu_group);
put_device(&group->dev);
}
-static void vfio_group_get(struct vfio_group *group)
-{
- refcount_inc(&group->users);
-}
-
/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
-static void vfio_device_put(struct vfio_device *device)
+static void vfio_device_put_registration(struct vfio_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->comp);
}
-static bool vfio_device_try_get(struct vfio_device *device)
+static bool vfio_device_try_get_registration(struct vfio_device *device)
{
return refcount_inc_not_zero(&device->refcount);
}
@@ -469,7 +317,8 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
mutex_lock(&group->device_lock);
list_for_each_entry(device, &group->device_list, group_next) {
- if (device->dev == dev && vfio_device_try_get(device)) {
+ if (device->dev == dev &&
+ vfio_device_try_get_registration(device)) {
mutex_unlock(&group->device_lock);
return device;
}
@@ -481,20 +330,110 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
/*
* VFIO driver API
*/
-void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
- const struct vfio_device_ops *ops)
+/* Release helper called by vfio_put_device() */
+static void vfio_device_release(struct device *dev)
+{
+ struct vfio_device *device =
+ container_of(dev, struct vfio_device, device);
+
+ vfio_release_device_set(device);
+ ida_free(&vfio.device_ida, device->index);
+
+ /*
+ * kvfree() cannot be done here due to a life cycle mess in
+ * vfio-ccw. Before the ccw part is fixed all drivers are
+ * required to support @release and call vfio_free_device()
+ * from there.
+ */
+ device->ops->release(device);
+}
+
+/*
+ * Allocate and initialize vfio_device so it can be registered to vfio
+ * core.
+ *
+ * Drivers should use the wrapper vfio_alloc_device() for allocation.
+ * @size is the size of the structure to be allocated, including any
+ * private data used by the driver.
+ *
+ * Driver may provide an @init callback to cover device private data.
+ *
+ * Use vfio_put_device() to release the structure after success return.
+ */
+struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ const struct vfio_device_ops *ops)
{
+ struct vfio_device *device;
+ int ret;
+
+ if (WARN_ON(size < sizeof(struct vfio_device)))
+ return ERR_PTR(-EINVAL);
+
+ device = kvzalloc(size, GFP_KERNEL);
+ if (!device)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vfio_init_device(device, dev, ops);
+ if (ret)
+ goto out_free;
+ return device;
+
+out_free:
+ kvfree(device);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+
+/*
+ * Initialize a vfio_device so it can be registered to vfio core.
+ *
+ * Only vfio-ccw driver should call this interface.
+ */
+int vfio_init_device(struct vfio_device *device, struct device *dev,
+ const struct vfio_device_ops *ops)
+{
+ int ret;
+
+ ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
+ if (ret < 0) {
+ dev_dbg(dev, "Error to alloc index\n");
+ return ret;
+ }
+
+ device->index = ret;
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
+
+ if (ops->init) {
+ ret = ops->init(device);
+ if (ret)
+ goto out_uninit;
+ }
+
+ device_initialize(&device->device);
+ device->device.release = vfio_device_release;
+ device->device.class = vfio.device_class;
+ device->device.parent = device->dev;
+ return 0;
+
+out_uninit:
+ vfio_release_device_set(device);
+ ida_free(&vfio.device_ida, device->index);
+ return ret;
}
-EXPORT_SYMBOL_GPL(vfio_init_group_dev);
+EXPORT_SYMBOL_GPL(vfio_init_device);
-void vfio_uninit_group_dev(struct vfio_device *device)
+/*
+ * The helper called by driver @release callback to free the device
+ * structure. Drivers which don't have private data to clean can
+ * simply use this helper as its @release.
+ */
+void vfio_free_device(struct vfio_device *device)
{
- vfio_release_device_set(device);
+ kvfree(device);
}
-EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
+EXPORT_SYMBOL_GPL(vfio_free_device);
static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
enum vfio_group_type type)
@@ -535,8 +474,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
struct vfio_group *group;
iommu_group = iommu_group_get(dev);
-#ifdef CONFIG_VFIO_NOIOMMU
- if (!iommu_group && noiommu) {
+ if (!iommu_group && vfio_noiommu) {
/*
* With noiommu enabled, create an IOMMU group for devices that
* don't already have one, implying no IOMMU hardware/driver
@@ -550,7 +488,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
}
return group;
}
-#endif
+
if (!iommu_group)
return ERR_PTR(-EINVAL);
@@ -577,7 +515,12 @@ static int __vfio_register_dev(struct vfio_device *device,
struct vfio_group *group)
{
struct vfio_device *existing_device;
+ int ret;
+ /*
+ * In all cases group is the output of one of the group allocation
+ * functions and we have group->drivers incremented for us.
+ */
if (IS_ERR(group))
return PTR_ERR(group);
@@ -590,28 +533,39 @@ static int __vfio_register_dev(struct vfio_device *device,
existing_device = vfio_group_get_device(group, device->dev);
if (existing_device) {
+ /*
+ * group->iommu_group is non-NULL because we hold the drivers
+ * refcount.
+ */
dev_WARN(device->dev, "Device already exists on group %d\n",
iommu_group_id(group->iommu_group));
- vfio_device_put(existing_device);
- if (group->type == VFIO_NO_IOMMU ||
- group->type == VFIO_EMULATED_IOMMU)
- iommu_group_remove_device(device->dev);
- vfio_group_put(group);
- return -EBUSY;
+ vfio_device_put_registration(existing_device);
+ ret = -EBUSY;
+ goto err_out;
}
/* Our reference on group is moved to the device */
device->group = group;
+ ret = dev_set_name(&device->device, "vfio%d", device->index);
+ if (ret)
+ goto err_out;
+
+ ret = device_add(&device->device);
+ if (ret)
+ goto err_out;
+
/* Refcounting can't start until the driver calls register */
refcount_set(&device->refcount, 1);
mutex_lock(&group->device_lock);
list_add(&device->group_next, &group->device_list);
- group->dev_counter++;
mutex_unlock(&group->device_lock);
return 0;
+err_out:
+ vfio_device_remove_group(device);
+ return ret;
}
int vfio_register_group_dev(struct vfio_device *device)
@@ -651,7 +605,7 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
ret = !strcmp(dev_name(it->dev), buf);
}
- if (ret && vfio_device_try_get(it)) {
+ if (ret && vfio_device_try_get_registration(it)) {
device = it;
break;
}
@@ -671,7 +625,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
bool interrupted = false;
long rc;
- vfio_device_put(device);
+ vfio_device_put_registration(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
if (device->ops->request)
@@ -696,356 +650,78 @@ void vfio_unregister_group_dev(struct vfio_device *device)
mutex_lock(&group->device_lock);
list_del(&device->group_next);
- group->dev_counter--;
mutex_unlock(&group->device_lock);
- if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
- iommu_group_remove_device(device->dev);
+ /* Balances device_add in register path */
+ device_del(&device->device);
- /* Matches the get in vfio_register_group_dev() */
- vfio_group_put(group);
+ vfio_device_remove_group(device);
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
/*
- * VFIO base fd, /dev/vfio/vfio
- */
-static long vfio_ioctl_check_extension(struct vfio_container *container,
- unsigned long arg)
-{
- struct vfio_iommu_driver *driver;
- long ret = 0;
-
- down_read(&container->group_lock);
-
- driver = container->iommu_driver;
-
- switch (arg) {
- /* No base extensions yet */
- default:
- /*
- * If no driver is set, poll all registered drivers for
- * extensions and return the first positive result. If
- * a driver is already set, further queries will be passed
- * only to that driver.
- */
- if (!driver) {
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list,
- vfio_next) {
-
- if (!list_empty(&container->group_list) &&
- !vfio_iommu_driver_allowed(container,
- driver))
- continue;
- if (!try_module_get(driver->ops->owner))
- continue;
-
- ret = driver->ops->ioctl(NULL,
- VFIO_CHECK_EXTENSION,
- arg);
- module_put(driver->ops->owner);
- if (ret > 0)
- break;
- }
- mutex_unlock(&vfio.iommu_drivers_lock);
- } else
- ret = driver->ops->ioctl(container->iommu_data,
- VFIO_CHECK_EXTENSION, arg);
- }
-
- up_read(&container->group_lock);
-
- return ret;
-}
-
-/* hold write lock on container->group_lock */
-static int __vfio_container_attach_groups(struct vfio_container *container,
- struct vfio_iommu_driver *driver,
- void *data)
-{
- struct vfio_group *group;
- int ret = -ENODEV;
-
- list_for_each_entry(group, &container->group_list, container_next) {
- ret = driver->ops->attach_group(data, group->iommu_group,
- group->type);
- if (ret)
- goto unwind;
- }
-
- return ret;
-
-unwind:
- list_for_each_entry_continue_reverse(group, &container->group_list,
- container_next) {
- driver->ops->detach_group(data, group->iommu_group);
- }
-
- return ret;
-}
-
-static long vfio_ioctl_set_iommu(struct vfio_container *container,
- unsigned long arg)
-{
- struct vfio_iommu_driver *driver;
- long ret = -ENODEV;
-
- down_write(&container->group_lock);
-
- /*
- * The container is designed to be an unprivileged interface while
- * the group can be assigned to specific users. Therefore, only by
- * adding a group to a container does the user get the privilege of
- * enabling the iommu, which may allocate finite resources. There
- * is no unset_iommu, but by removing all the groups from a container,
- * the container is deprivileged and returns to an unset state.
- */
- if (list_empty(&container->group_list) || container->iommu_driver) {
- up_write(&container->group_lock);
- return -EINVAL;
- }
-
- mutex_lock(&vfio.iommu_drivers_lock);
- list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
- void *data;
-
- if (!vfio_iommu_driver_allowed(container, driver))
- continue;
- if (!try_module_get(driver->ops->owner))
- continue;
-
- /*
- * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
- * so test which iommu driver reported support for this
- * extension and call open on them. We also pass them the
- * magic, allowing a single driver to support multiple
- * interfaces if they'd like.
- */
- if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
- module_put(driver->ops->owner);
- continue;
- }
-
- data = driver->ops->open(arg);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- module_put(driver->ops->owner);
- continue;
- }
-
- ret = __vfio_container_attach_groups(container, driver, data);
- if (ret) {
- driver->ops->release(data);
- module_put(driver->ops->owner);
- continue;
- }
-
- container->iommu_driver = driver;
- container->iommu_data = data;
- break;
- }
-
- mutex_unlock(&vfio.iommu_drivers_lock);
- up_write(&container->group_lock);
-
- return ret;
-}
-
-static long vfio_fops_unl_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver;
- void *data;
- long ret = -EINVAL;
-
- if (!container)
- return ret;
-
- switch (cmd) {
- case VFIO_GET_API_VERSION:
- ret = VFIO_API_VERSION;
- break;
- case VFIO_CHECK_EXTENSION:
- ret = vfio_ioctl_check_extension(container, arg);
- break;
- case VFIO_SET_IOMMU:
- ret = vfio_ioctl_set_iommu(container, arg);
- break;
- default:
- driver = container->iommu_driver;
- data = container->iommu_data;
-
- if (driver) /* passthrough all unrecognized ioctls */
- ret = driver->ops->ioctl(data, cmd, arg);
- }
-
- return ret;
-}
-
-static int vfio_fops_open(struct inode *inode, struct file *filep)
-{
- struct vfio_container *container;
-
- container = kzalloc(sizeof(*container), GFP_KERNEL);
- if (!container)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&container->group_list);
- init_rwsem(&container->group_lock);
- kref_init(&container->kref);
-
- filep->private_data = container;
-
- return 0;
-}
-
-static int vfio_fops_release(struct inode *inode, struct file *filep)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
-
- if (driver && driver->ops->notify)
- driver->ops->notify(container->iommu_data,
- VFIO_IOMMU_CONTAINER_CLOSE);
-
- filep->private_data = NULL;
-
- vfio_container_put(container);
-
- return 0;
-}
-
-static const struct file_operations vfio_fops = {
- .owner = THIS_MODULE,
- .open = vfio_fops_open,
- .release = vfio_fops_release,
- .unlocked_ioctl = vfio_fops_unl_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/*
* VFIO Group fd, /dev/vfio/$GROUP
*/
-static void __vfio_group_unset_container(struct vfio_group *group)
-{
- struct vfio_container *container = group->container;
- struct vfio_iommu_driver *driver;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- down_write(&container->group_lock);
-
- driver = container->iommu_driver;
- if (driver)
- driver->ops->detach_group(container->iommu_data,
- group->iommu_group);
-
- if (group->type == VFIO_IOMMU)
- iommu_group_release_dma_owner(group->iommu_group);
-
- group->container = NULL;
- group->container_users = 0;
- list_del(&group->container_next);
-
- /* Detaching the last group deprivileges a container, remove iommu */
- if (driver && list_empty(&container->group_list)) {
- driver->ops->release(container->iommu_data);
- module_put(driver->ops->owner);
- container->iommu_driver = NULL;
- container->iommu_data = NULL;
- }
-
- up_write(&container->group_lock);
-
- vfio_container_put(container);
-}
-
/*
* VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
* if there was no container to unset. Since the ioctl is called on
* the group, we know that still exists, therefore the only valid
* transition here is 1->0.
*/
-static int vfio_group_unset_container(struct vfio_group *group)
+static int vfio_group_ioctl_unset_container(struct vfio_group *group)
{
- lockdep_assert_held_write(&group->group_rwsem);
+ int ret = 0;
- if (!group->container)
- return -EINVAL;
- if (group->container_users != 1)
- return -EBUSY;
- __vfio_group_unset_container(group);
- return 0;
+ mutex_lock(&group->group_lock);
+ if (!group->container) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (group->container_users != 1) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ vfio_group_detach_container(group);
+
+out_unlock:
+ mutex_unlock(&group->group_lock);
+ return ret;
}
-static int vfio_group_set_container(struct vfio_group *group, int container_fd)
+static int vfio_group_ioctl_set_container(struct vfio_group *group,
+ int __user *arg)
{
- struct fd f;
struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret = 0;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- if (group->container || WARN_ON(group->container_users))
- return -EINVAL;
+ struct fd f;
+ int ret;
+ int fd;
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
- return -EPERM;
+ if (get_user(fd, arg))
+ return -EFAULT;
- f = fdget(container_fd);
+ f = fdget(fd);
if (!f.file)
return -EBADF;
- /* Sanity check, is this really our fd? */
- if (f.file->f_op != &vfio_fops) {
- fdput(f);
- return -EINVAL;
+ mutex_lock(&group->group_lock);
+ if (group->container || WARN_ON(group->container_users)) {
+ ret = -EINVAL;
+ goto out_unlock;
}
-
- container = f.file->private_data;
- WARN_ON(!container); /* fget ensures we don't race vfio_release */
-
- down_write(&container->group_lock);
-
- /* Real groups and fake groups cannot mix */
- if (!list_empty(&container->group_list) &&
- container->noiommu != (group->type == VFIO_NO_IOMMU)) {
- ret = -EPERM;
- goto unlock_out;
- }
-
- if (group->type == VFIO_IOMMU) {
- ret = iommu_group_claim_dma_owner(group->iommu_group, f.file);
- if (ret)
- goto unlock_out;
+ if (!group->iommu_group) {
+ ret = -ENODEV;
+ goto out_unlock;
}
- driver = container->iommu_driver;
- if (driver) {
- ret = driver->ops->attach_group(container->iommu_data,
- group->iommu_group,
- group->type);
- if (ret) {
- if (group->type == VFIO_IOMMU)
- iommu_group_release_dma_owner(
- group->iommu_group);
- goto unlock_out;
- }
+ container = vfio_container_from_file(f.file);
+ ret = -EINVAL;
+ if (container) {
+ ret = vfio_container_attach_group(container, group);
+ goto out_unlock;
}
- group->container = container;
- group->container_users = 1;
- container->noiommu = (group->type == VFIO_NO_IOMMU);
- list_add(&group->container_next, &container->group_list);
-
- /* Get a reference on the container and mark a user within the group */
- vfio_container_get(container);
-
-unlock_out:
- up_write(&container->group_lock);
+out_unlock:
+ mutex_unlock(&group->group_lock);
fdput(f);
return ret;
}
@@ -1053,47 +729,19 @@ unlock_out:
static const struct file_operations vfio_device_fops;
/* true if the vfio_device has open_device() called but not close_device() */
-static bool vfio_assert_device_open(struct vfio_device *device)
+bool vfio_assert_device_open(struct vfio_device *device)
{
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
-static int vfio_device_assign_container(struct vfio_device *device)
-{
- struct vfio_group *group = device->group;
-
- lockdep_assert_held_write(&group->group_rwsem);
-
- if (!group->container || !group->container->iommu_driver ||
- WARN_ON(!group->container_users))
- return -EINVAL;
-
- if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- get_file(group->opened_file);
- group->container_users++;
- return 0;
-}
-
-static void vfio_device_unassign_container(struct vfio_device *device)
-{
- down_write(&device->group->group_rwsem);
- WARN_ON(device->group->container_users <= 1);
- device->group->container_users--;
- fput(device->group->opened_file);
- up_write(&device->group->group_rwsem);
-}
-
static struct file *vfio_device_open(struct vfio_device *device)
{
- struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
- down_write(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
ret = vfio_device_assign_container(device);
- up_write(&device->group->group_rwsem);
+ mutex_unlock(&device->group->group_lock);
if (ret)
return ERR_PTR(ret);
@@ -1110,7 +758,7 @@ static struct file *vfio_device_open(struct vfio_device *device)
* lock. If the device driver will use it, it must obtain a
* reference and release it during close_device.
*/
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
device->kvm = device->group->kvm;
if (device->ops->open_device) {
@@ -1118,13 +766,8 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
-
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->register_device)
- iommu_driver->ops->register_device(
- device->group->container->iommu_data, device);
-
- up_read(&device->group->group_rwsem);
+ vfio_device_container_register(device);
+ mutex_unlock(&device->group->group_lock);
}
mutex_unlock(&device->dev_set->lock);
@@ -1157,17 +800,14 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->unregister_device)
- iommu_driver->ops->unregister_device(
- device->group->container->iommu_data, device);
+ vfio_device_container_unregister(device);
}
err_undo_count:
- up_read(&device->group->group_rwsem);
+ mutex_unlock(&device->group->group_lock);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
@@ -1178,14 +818,21 @@ err_unassign_container:
return ERR_PTR(ret);
}
-static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+static int vfio_group_ioctl_get_device_fd(struct vfio_group *group,
+ char __user *arg)
{
struct vfio_device *device;
struct file *filep;
+ char *buf;
int fdno;
int ret;
+ buf = strndup_user(arg, PAGE_SIZE);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
device = vfio_device_get_from_name(group, buf);
+ kfree(buf);
if (IS_ERR(device))
return PTR_ERR(device);
@@ -1207,81 +854,60 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
err_put_fdno:
put_unused_fd(fdno);
err_put_device:
- vfio_device_put(device);
+ vfio_device_put_registration(device);
return ret;
}
-static long vfio_group_fops_unl_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
+static int vfio_group_ioctl_get_status(struct vfio_group *group,
+ struct vfio_group_status __user *arg)
{
- struct vfio_group *group = filep->private_data;
- long ret = -ENOTTY;
+ unsigned long minsz = offsetofend(struct vfio_group_status, flags);
+ struct vfio_group_status status;
- switch (cmd) {
- case VFIO_GROUP_GET_STATUS:
- {
- struct vfio_group_status status;
- unsigned long minsz;
+ if (copy_from_user(&status, arg, minsz))
+ return -EFAULT;
- minsz = offsetofend(struct vfio_group_status, flags);
+ if (status.argsz < minsz)
+ return -EINVAL;
- if (copy_from_user(&status, (void __user *)arg, minsz))
- return -EFAULT;
+ status.flags = 0;
- if (status.argsz < minsz)
- return -EINVAL;
+ mutex_lock(&group->group_lock);
+ if (!group->iommu_group) {
+ mutex_unlock(&group->group_lock);
+ return -ENODEV;
+ }
- status.flags = 0;
+ if (group->container)
+ status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
+ VFIO_GROUP_FLAGS_VIABLE;
+ else if (!iommu_group_dma_owner_claimed(group->iommu_group))
+ status.flags |= VFIO_GROUP_FLAGS_VIABLE;
+ mutex_unlock(&group->group_lock);
- down_read(&group->group_rwsem);
- if (group->container)
- status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
- VFIO_GROUP_FLAGS_VIABLE;
- else if (!iommu_group_dma_owner_claimed(group->iommu_group))
- status.flags |= VFIO_GROUP_FLAGS_VIABLE;
- up_read(&group->group_rwsem);
+ if (copy_to_user(arg, &status, minsz))
+ return -EFAULT;
+ return 0;
+}
- if (copy_to_user((void __user *)arg, &status, minsz))
- return -EFAULT;
+static long vfio_group_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_group *group = filep->private_data;
+ void __user *uarg = (void __user *)arg;
- ret = 0;
- break;
- }
+ switch (cmd) {
+ case VFIO_GROUP_GET_DEVICE_FD:
+ return vfio_group_ioctl_get_device_fd(group, uarg);
+ case VFIO_GROUP_GET_STATUS:
+ return vfio_group_ioctl_get_status(group, uarg);
case VFIO_GROUP_SET_CONTAINER:
- {
- int fd;
-
- if (get_user(fd, (int __user *)arg))
- return -EFAULT;
-
- if (fd < 0)
- return -EINVAL;
-
- down_write(&group->group_rwsem);
- ret = vfio_group_set_container(group, fd);
- up_write(&group->group_rwsem);
- break;
- }
+ return vfio_group_ioctl_set_container(group, uarg);
case VFIO_GROUP_UNSET_CONTAINER:
- down_write(&group->group_rwsem);
- ret = vfio_group_unset_container(group);
- up_write(&group->group_rwsem);
- break;
- case VFIO_GROUP_GET_DEVICE_FD:
- {
- char *buf;
-
- buf = strndup_user((const char __user *)arg, PAGE_SIZE);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = vfio_group_get_device_fd(group, buf);
- kfree(buf);
- break;
- }
+ return vfio_group_ioctl_unset_container(group);
+ default:
+ return -ENOTTY;
}
-
- return ret;
}
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
@@ -1290,17 +916,20 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
container_of(inode->i_cdev, struct vfio_group, cdev);
int ret;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
- /* users can be zero if this races with vfio_group_put() */
- if (!refcount_inc_not_zero(&group->users)) {
+ /*
+ * drivers can be zero if this races with vfio_device_remove_group(), it
+ * will be stable at 0 under the group rwsem
+ */
+ if (refcount_read(&group->drivers) == 0) {
ret = -ENODEV;
- goto err_unlock;
+ goto out_unlock;
}
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
ret = -EPERM;
- goto err_put;
+ goto out_unlock;
}
/*
@@ -1308,17 +937,13 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
*/
if (group->opened_file) {
ret = -EBUSY;
- goto err_put;
+ goto out_unlock;
}
group->opened_file = filep;
filep->private_data = group;
-
- up_write(&group->group_rwsem);
- return 0;
-err_put:
- vfio_group_put(group);
-err_unlock:
- up_write(&group->group_rwsem);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&group->group_lock);
return ret;
}
@@ -1328,21 +953,16 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
filep->private_data = NULL;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
/*
* Device FDs hold a group file reference, therefore the group release
* is only called when there are no open devices.
*/
WARN_ON(group->notifier.head);
- if (group->container) {
- WARN_ON(group->container_users != 1);
- __vfio_group_unset_container(group);
- }
+ if (group->container)
+ vfio_group_detach_container(group);
group->opened_file = NULL;
- up_write(&group->group_rwsem);
-
- vfio_group_put(group);
-
+ mutex_unlock(&group->group_lock);
return 0;
}
@@ -1355,24 +975,53 @@ static const struct file_operations vfio_group_fops = {
};
/*
+ * Wrapper around pm_runtime_resume_and_get().
+ * Return error code on failure or 0 on success.
+ */
+static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
+{
+ struct device *dev = device->dev;
+
+ if (dev->driver && dev->driver->pm) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_info_ratelimited(dev,
+ "vfio: runtime resume failed %d\n", ret);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper around pm_runtime_put().
+ */
+static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
+{
+ struct device *dev = device->dev;
+
+ if (dev->driver && dev->driver->pm)
+ pm_runtime_put(dev);
+}
+
+/*
* VFIO Device fd
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
- struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
- down_read(&device->group->group_rwsem);
+ mutex_lock(&device->group->group_lock);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
- iommu_driver = device->group->container->iommu_driver;
- if (iommu_driver && iommu_driver->ops->unregister_device)
- iommu_driver->ops->unregister_device(
- device->group->container->iommu_data, device);
- up_read(&device->group->group_rwsem);
+ vfio_device_container_unregister(device);
+ mutex_unlock(&device->group->group_lock);
device->open_count--;
if (device->open_count == 0)
device->kvm = NULL;
@@ -1382,7 +1031,7 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
vfio_device_unassign_container(device);
- vfio_device_put(device);
+ vfio_device_put_registration(device);
return 0;
}
@@ -1628,6 +1277,167 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
return 0;
}
+/* Ranges should fit into a single kernel page */
+#define LOG_MAX_RANGES \
+ (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
+
+static int
+vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ size_t minsz =
+ offsetofend(struct vfio_device_feature_dma_logging_control,
+ ranges);
+ struct vfio_device_feature_dma_logging_range __user *ranges;
+ struct vfio_device_feature_dma_logging_control control;
+ struct vfio_device_feature_dma_logging_range range;
+ struct rb_root_cached root = RB_ROOT_CACHED;
+ struct interval_tree_node *nodes;
+ u64 iova_end;
+ u32 nnodes;
+ int i, ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_SET,
+ sizeof(control));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&control, arg, minsz))
+ return -EFAULT;
+
+ nnodes = control.num_ranges;
+ if (!nnodes)
+ return -EINVAL;
+
+ if (nnodes > LOG_MAX_RANGES)
+ return -E2BIG;
+
+ ranges = u64_to_user_ptr(control.ranges);
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nnodes; i++) {
+ if (copy_from_user(&range, &ranges[i], sizeof(range))) {
+ ret = -EFAULT;
+ goto end;
+ }
+ if (!IS_ALIGNED(range.iova, control.page_size) ||
+ !IS_ALIGNED(range.length, control.page_size)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (check_add_overflow(range.iova, range.length, &iova_end) ||
+ iova_end > ULONG_MAX) {
+ ret = -EOVERFLOW;
+ goto end;
+ }
+
+ nodes[i].start = range.iova;
+ nodes[i].last = range.iova + range.length - 1;
+ if (interval_tree_iter_first(&root, nodes[i].start,
+ nodes[i].last)) {
+ /* Range overlapping */
+ ret = -EINVAL;
+ goto end;
+ }
+ interval_tree_insert(nodes + i, &root);
+ }
+
+ ret = device->log_ops->log_start(device, &root, nnodes,
+ &control.page_size);
+ if (ret)
+ goto end;
+
+ if (copy_to_user(arg, &control, sizeof(control))) {
+ ret = -EFAULT;
+ device->log_ops->log_stop(device);
+ }
+
+end:
+ kfree(nodes);
+ return ret;
+}
+
+static int
+vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ int ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_SET, 0);
+ if (ret != 1)
+ return ret;
+
+ return device->log_ops->log_stop(device);
+}
+
+static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
+ unsigned long iova, size_t length,
+ void *opaque)
+{
+ struct vfio_device *device = opaque;
+
+ return device->log_ops->log_read_and_clear(device, iova, length, iter);
+}
+
+static int
+vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
+ u32 flags, void __user *arg,
+ size_t argsz)
+{
+ size_t minsz =
+ offsetofend(struct vfio_device_feature_dma_logging_report,
+ bitmap);
+ struct vfio_device_feature_dma_logging_report report;
+ struct iova_bitmap *iter;
+ u64 iova_end;
+ int ret;
+
+ if (!device->log_ops)
+ return -ENOTTY;
+
+ ret = vfio_check_feature(flags, argsz,
+ VFIO_DEVICE_FEATURE_GET,
+ sizeof(report));
+ if (ret != 1)
+ return ret;
+
+ if (copy_from_user(&report, arg, minsz))
+ return -EFAULT;
+
+ if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
+ return -EINVAL;
+
+ if (check_add_overflow(report.iova, report.length, &iova_end) ||
+ iova_end > ULONG_MAX)
+ return -EOVERFLOW;
+
+ iter = iova_bitmap_alloc(report.iova, report.length,
+ report.page_size,
+ u64_to_user_ptr(report.bitmap));
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = iova_bitmap_for_each(iter, device,
+ vfio_device_log_read_and_clear);
+
+ iova_bitmap_free(iter);
+ return ret;
+}
+
static int vfio_ioctl_device_feature(struct vfio_device *device,
struct vfio_device_feature __user *arg)
{
@@ -1661,6 +1471,18 @@ static int vfio_ioctl_device_feature(struct vfio_device *device,
return vfio_ioctl_device_feature_mig_device_state(
device, feature.flags, arg->data,
feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
+ return vfio_ioctl_device_feature_logging_start(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
+ return vfio_ioctl_device_feature_logging_stop(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
+ case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
+ return vfio_ioctl_device_feature_logging_report(
+ device, feature.flags, arg->data,
+ feature.argsz - minsz);
default:
if (unlikely(!device->ops->device_feature))
return -EINVAL;
@@ -1674,15 +1496,27 @@ static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_device *device = filep->private_data;
+ int ret;
+
+ ret = vfio_device_pm_runtime_get(device);
+ if (ret)
+ return ret;
switch (cmd) {
case VFIO_DEVICE_FEATURE:
- return vfio_ioctl_device_feature(device, (void __user *)arg);
+ ret = vfio_ioctl_device_feature(device, (void __user *)arg);
+ break;
+
default:
if (unlikely(!device->ops->ioctl))
- return -EINVAL;
- return device->ops->ioctl(device, cmd, arg);
+ ret = -EINVAL;
+ else
+ ret = device->ops->ioctl(device, cmd, arg);
+ break;
}
+
+ vfio_device_pm_runtime_put(device);
+ return ret;
}
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
@@ -1732,19 +1566,42 @@ static const struct file_operations vfio_device_fops = {
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file
*
- * The returned iommu_group is valid as long as a ref is held on the file.
+ * The returned iommu_group is valid as long as a ref is held on the file. This
+ * returns a reference on the group. This function is deprecated, only the SPAPR
+ * path in kvm should call it.
*/
struct iommu_group *vfio_file_iommu_group(struct file *file)
{
struct vfio_group *group = file->private_data;
+ struct iommu_group *iommu_group = NULL;
- if (file->f_op != &vfio_group_fops)
+ if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
return NULL;
- return group->iommu_group;
+
+ if (!vfio_file_is_group(file))
+ return NULL;
+
+ mutex_lock(&group->group_lock);
+ if (group->iommu_group) {
+ iommu_group = group->iommu_group;
+ iommu_group_ref_get(iommu_group);
+ }
+ mutex_unlock(&group->group_lock);
+ return iommu_group;
}
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
/**
+ * vfio_file_is_group - True if the file is usable with VFIO aPIS
+ * @file: VFIO group file
+ */
+bool vfio_file_is_group(struct file *file)
+{
+ return file->f_op == &vfio_group_fops;
+}
+EXPORT_SYMBOL_GPL(vfio_file_is_group);
+
+/**
* vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
* is always CPU cache coherent
* @file: VFIO group file
@@ -1758,13 +1615,13 @@ bool vfio_file_enforced_coherent(struct file *file)
struct vfio_group *group = file->private_data;
bool ret;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return true;
- down_read(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
if (group->container) {
- ret = vfio_ioctl_check_extension(group->container,
- VFIO_DMA_CC_IOMMU);
+ ret = vfio_container_ioctl_check_extension(group->container,
+ VFIO_DMA_CC_IOMMU);
} else {
/*
* Since the coherency state is determined only once a container
@@ -1773,7 +1630,7 @@ bool vfio_file_enforced_coherent(struct file *file)
*/
ret = true;
}
- up_read(&group->group_rwsem);
+ mutex_unlock(&group->group_lock);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
@@ -1790,12 +1647,12 @@ void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
struct vfio_group *group = file->private_data;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return;
- down_write(&group->group_rwsem);
+ mutex_lock(&group->group_lock);
group->kvm = kvm;
- up_write(&group->group_rwsem);
+ mutex_unlock(&group->group_lock);
}
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
@@ -1810,7 +1667,7 @@ bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
struct vfio_group *group = file->private_data;
- if (file->f_op != &vfio_group_fops)
+ if (!vfio_file_is_group(file))
return false;
return group == device->group;
@@ -1937,114 +1794,6 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin contiguous user pages and return their associated host pages for local
- * domain only.
- * @device [in] : device
- * @iova [in] : starting IOVA of user pages to be pinned.
- * @npage [in] : count of pages to be pinned. This count should not
- * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @prot [in] : protection flags
- * @pages[out] : array of host pages
- * Return error or number of pages pinned.
- */
-int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
- int npage, int prot, struct page **pages)
-{
- struct vfio_container *container;
- struct vfio_group *group = device->group;
- struct vfio_iommu_driver *driver;
- int ret;
-
- if (!pages || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
-
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
-
- if (group->dev_counter > 1)
- return -EINVAL;
-
- /* group->container cannot change while a vfio device is open */
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->pin_pages))
- ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, iova,
- npage, prot, pages);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-EXPORT_SYMBOL(vfio_pin_pages);
-
-/*
- * Unpin contiguous host pages for local domain only.
- * @device [in] : device
- * @iova [in] : starting address of user pages to be unpinned.
- * @npage [in] : count of pages to be unpinned. This count should not
- * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- */
-void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
-
- if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
- return;
-
- if (WARN_ON(!vfio_assert_device_open(device)))
- return;
-
- /* group->container cannot change while a vfio device is open */
- container = device->group->container;
- driver = container->iommu_driver;
-
- driver->ops->unpin_pages(container->iommu_data, iova, npage);
-}
-EXPORT_SYMBOL(vfio_unpin_pages);
-
-/*
- * This interface allows the CPUs to perform some sort of virtual DMA on
- * behalf of the device.
- *
- * CPUs read/write from/into a range of IOVAs pointing to user space memory
- * into/from a kernel buffer.
- *
- * As the read/write of user space memory is conducted via the CPUs and is
- * not a real device DMA, it is not necessary to pin the user space memory.
- *
- * @device [in] : VFIO device
- * @iova [in] : base IOVA of a user space buffer
- * @data [in] : pointer to kernel buffer
- * @len [in] : kernel buffer length
- * @write : indicate read or write
- * Return error code on failure or 0 on success.
- */
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
- size_t len, bool write)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret = 0;
-
- if (!data || len <= 0 || !vfio_assert_device_open(device))
- return -EINVAL;
-
- /* group->container cannot change while a vfio device is open */
- container = device->group->container;
- driver = container->iommu_driver;
-
- if (likely(driver && driver->ops->dma_rw))
- ret = driver->ops->dma_rw(container->iommu_data,
- iova, data, len, write);
- else
- ret = -ENOTTY;
- return ret;
-}
-EXPORT_SYMBOL(vfio_dma_rw);
-
-/*
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
@@ -2052,59 +1801,50 @@ static char *vfio_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
-static struct miscdevice vfio_dev = {
- .minor = VFIO_MINOR,
- .name = "vfio",
- .fops = &vfio_fops,
- .nodename = "vfio/vfio",
- .mode = S_IRUGO | S_IWUGO,
-};
-
static int __init vfio_init(void)
{
int ret;
ida_init(&vfio.group_ida);
+ ida_init(&vfio.device_ida);
mutex_init(&vfio.group_lock);
- mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.group_list);
- INIT_LIST_HEAD(&vfio.iommu_drivers_list);
- ret = misc_register(&vfio_dev);
- if (ret) {
- pr_err("vfio: misc device register failed\n");
+ ret = vfio_container_init();
+ if (ret)
return ret;
- }
/* /dev/vfio/$GROUP */
vfio.class = class_create(THIS_MODULE, "vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
- goto err_class;
+ goto err_group_class;
}
vfio.class->devnode = vfio_devnode;
+ /* /sys/class/vfio-dev/vfioX */
+ vfio.device_class = class_create(THIS_MODULE, "vfio-dev");
+ if (IS_ERR(vfio.device_class)) {
+ ret = PTR_ERR(vfio.device_class);
+ goto err_dev_class;
+ }
+
ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
if (ret)
goto err_alloc_chrdev;
-#ifdef CONFIG_VFIO_NOIOMMU
- ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
-#endif
- if (ret)
- goto err_driver_register;
-
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
-err_driver_register:
- unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
+err_dev_class:
class_destroy(vfio.class);
vfio.class = NULL;
-err_class:
- misc_deregister(&vfio_dev);
+err_group_class:
+ vfio_container_cleanup();
return ret;
}
@@ -2112,14 +1852,14 @@ static void __exit vfio_cleanup(void)
{
WARN_ON(!list_empty(&vfio.group_list));
-#ifdef CONFIG_VFIO_NOIOMMU
- vfio_unregister_iommu_driver(&vfio_noiommu_ops);
-#endif
+ ida_destroy(&vfio.device_ida);
ida_destroy(&vfio.group_ida);
unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
+ class_destroy(vfio.device_class);
+ vfio.device_class = NULL;
class_destroy(vfio.class);
+ vfio_container_cleanup();
vfio.class = NULL;
- misc_deregister(&vfio_dev);
xa_destroy(&vfio_device_set_xa);
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d7a04d573988..20265393aee7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1782,7 +1782,7 @@ static struct miscdevice vhost_net_misc = {
.fops = &vhost_net_fops,
};
-static int vhost_net_init(void)
+static int __init vhost_net_init(void)
{
if (experimental_zcopytx)
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
@@ -1790,7 +1790,7 @@ static int vhost_net_init(void)
}
module_init(vhost_net_init);
-static void vhost_net_exit(void)
+static void __exit vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index a317d9fe1d67..5f8fec9e5fd4 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -318,14 +318,6 @@ struct dac_info
void *data;
};
-
-static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
-{
- u8 code[2] = {reg, 0};
- info->dac_read_regs(info->data, code, 1);
- return code[1];
-}
-
static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
{
info->dac_read_regs(info->data, code, count);
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index aba46118b208..6bbcd9fc864e 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -108,13 +108,6 @@ static inline int PAR_EQUAL(struct fb_par_control *x, struct fb_par_control *y)
return (!DIRTY(cmode) && !DIRTY(xres) && !DIRTY(yres)
&& !DIRTY(vxres) && !DIRTY(vyres));
}
-static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninfo *y)
-{
- return (!DIRTY(bits_per_pixel) && !DIRTY(xres)
- && !DIRTY(yres) && !DIRTY(xres_virtual)
- && !DIRTY(yres_virtual)
- && !DIRTY_CMAP(red) && !DIRTY_CMAP(green) && !DIRTY_CMAP(blue));
-}
struct fb_info_control {
struct fb_info info;
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 6b4d5a7f3e15..1582c718329c 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1072,17 +1072,12 @@ static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *at
static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL);
-static void gbefb_remove_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_size);
- device_remove_file(dev, &dev_attr_revision);
-}
-
-static void gbefb_create_sysfs(struct device *dev)
-{
- device_create_file(dev, &dev_attr_size);
- device_create_file(dev, &dev_attr_revision);
-}
+static struct attribute *gbefb_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_revision.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gbefb);
/*
* Initialization
@@ -1221,7 +1216,6 @@ static int gbefb_probe(struct platform_device *p_dev)
}
platform_set_drvdata(p_dev, info);
- gbefb_create_sysfs(&p_dev->dev);
fb_info(info, "%s rev %d @ 0x%08x using %dkB memory\n",
info->fix.id, gbe_revision, (unsigned)GBE_BASE,
@@ -1248,7 +1242,6 @@ static int gbefb_remove(struct platform_device* p_dev)
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
- gbefb_remove_sysfs(&p_dev->dev);
framebuffer_release(info);
return 0;
@@ -1259,6 +1252,7 @@ static struct platform_driver gbefb_driver = {
.remove = gbefb_remove,
.driver = {
.name = "gbefb",
+ .dev_groups = gbefb_groups,
},
};
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 94f3bc637fc8..51fde1b2a793 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -972,7 +972,6 @@ static int imxfb_probe(struct platform_device *pdev)
fbi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fbi->regs)) {
- dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 96800c9c9cd9..90c79e8c1157 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -693,7 +693,7 @@ static int of_platform_mb862xx_probe(struct platform_device *ofdev)
par->dev = dev;
par->irq = irq_of_parse_and_map(np, 0);
- if (par->irq == NO_IRQ) {
+ if (!par->irq) {
dev_err(dev, "failed to map irq\n");
ret = -ENODEV;
goto fbrel;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index b2d6e6df2161..92fb6b7e1f68 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -519,11 +519,9 @@ int dispc_runtime_get(void)
DSSDBG("dispc_runtime_get\n");
- r = pm_runtime_get_sync(&dispc.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dispc.pdev->dev);
+ r = pm_runtime_resume_and_get(&dispc.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
EXPORT_SYMBOL(dispc_runtime_get);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d43b081d592f..54b0f034c2ed 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1136,11 +1136,9 @@ static int dsi_runtime_get(struct platform_device *dsidev)
DSSDBG("dsi_runtime_get\n");
- r = pm_runtime_get_sync(&dsi->pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dsi->pdev->dev);
+ r = pm_runtime_resume_and_get(&dsi->pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 45b9d3cf3860..335e0af4eec1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -767,11 +767,9 @@ int dss_runtime_get(void)
DSSDBG("dss_runtime_get\n");
- r = pm_runtime_get_sync(&dss.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&dss.pdev->dev);
+ r = pm_runtime_resume_and_get(&dss.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 800bd108e834..0f39612e002e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -38,11 +38,9 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_resume_and_get(&hdmi.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 2c03608addcd..bfccc2cb917a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -42,11 +42,9 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_resume_and_get(&hdmi.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 905d642ff9ed..78a7309d25dd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -347,11 +347,9 @@ static int venc_runtime_get(void)
DSSDBG("venc_runtime_get\n");
- r = pm_runtime_get_sync(&venc.pdev->dev);
- if (WARN_ON(r < 0)) {
- pm_runtime_put_sync(&venc.pdev->dev);
+ r = pm_runtime_resume_and_get(&venc.pdev->dev);
+ if (WARN_ON(r < 0))
return r;
- }
return 0;
}
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index d7aa5511c361..e65bdc499c23 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -137,6 +137,8 @@ static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len);
static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
static void ufx_free_urb_list(struct ufx_data *dev);
+static DEFINE_MUTEX(disconnect_mutex);
+
/* reads a control register */
static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
{
@@ -1071,9 +1073,13 @@ static int ufx_ops_open(struct fb_info *info, int user)
if (user == 0 && !console)
return -EBUSY;
+ mutex_lock(&disconnect_mutex);
+
/* If the USB device is gone, we don't accept new opens */
- if (dev->virtualized)
+ if (dev->virtualized) {
+ mutex_unlock(&disconnect_mutex);
return -ENODEV;
+ }
dev->fb_count++;
@@ -1097,6 +1103,8 @@ static int ufx_ops_open(struct fb_info *info, int user)
pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
info->node, user, info, dev->fb_count);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1741,6 +1749,8 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
+ mutex_lock(&disconnect_mutex);
+
dev = usb_get_intfdata(interface);
pr_debug("USB disconnect starting\n");
@@ -1761,6 +1771,8 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
kref_put(&dev->kref, ufx_free);
/* consider ufx_data freed */
+
+ mutex_unlock(&disconnect_mutex);
}
static struct usb_driver ufx_driver = {
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 38a861e22c33..7753e586e65a 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1298,7 +1298,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
/* limit fbsize to max visible screen size */
if (fix->smem_len > yres*fix->line_length)
- fix->smem_len = yres*fix->line_length;
+ fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024);
fix->accel = FB_ACCEL_NONE;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index f9c3b1d38fc2..219ce7292337 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1128,11 +1128,6 @@ static inline void shadowmode_on(struct tridentfb_par *par)
write3CE(par, CyberControl, read3CE(par, CyberControl) | 0x81);
}
-static inline void shadowmode_off(struct tridentfb_par *par)
-{
- write3CE(par, CyberControl, read3CE(par, CyberControl) & 0x7E);
-}
-
/* Set the hardware to the requested video mode */
static int tridentfb_set_par(struct fb_info *info)
{
@@ -1475,7 +1470,7 @@ static int trident_pci_probe(struct pci_dev *dev,
if (err)
return err;
- err = pci_enable_device(dev);
+ err = pcim_enable_device(dev);
if (err)
return err;
@@ -1715,12 +1710,10 @@ out_unmap2:
kfree(info->pixmap.addr);
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
disable_mmio(info->par);
out_unmap1:
if (default_par->io_virt)
iounmap(default_par->io_virt);
- release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
framebuffer_release(info);
return err;
}
@@ -1735,8 +1728,6 @@ static void trident_pci_remove(struct pci_dev *dev)
i2c_del_adapter(&par->ddc_adapter);
iounmap(par->io_virt);
iounmap(info->screen_base);
- release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
- release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
kfree(info->pixmap.addr);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index c863244ef12c..216d49c9d47e 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -370,7 +370,7 @@ static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
- int identical = width;
+ int identical;
int start = width;
int end = width;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 4df6772802d7..00d789b6c0fa 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -167,7 +167,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id));
m->seq = seq;
m->len = len;
- m->ack = prandom_u32();
+ m->ack = get_random_u32();
/* uvesafb_task structure */
memcpy(m + 1, &task->t, sizeof(task->t));
@@ -1580,7 +1580,7 @@ static ssize_t uvesafb_show_vendor(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_vendor_name_ptr);
else
return 0;
@@ -1595,7 +1595,7 @@ static ssize_t uvesafb_show_product_name(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_name_ptr);
else
return 0;
@@ -1610,7 +1610,7 @@ static ssize_t uvesafb_show_product_rev(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_rev_ptr);
else
return 0;
@@ -1625,7 +1625,7 @@ static ssize_t uvesafb_show_oem_string(struct device *dev,
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
(char *)(&par->vbe_ib) + par->vbe_ib.oem_string_ptr);
else
return 0;
@@ -1639,7 +1639,7 @@ static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
}
static ssize_t uvesafb_store_nocrtc(struct device *dev,
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 35cf51ae3292..af47f8217095 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1421,6 +1421,7 @@ static const struct platform_device_id vga16fb_driver_id_table[] = {
{"vga-framebuffer", 0},
{ }
};
+MODULE_DEVICE_TABLE(platform, vga16fb_driver_id_table);
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 0b43efddea22..dfd69bd77f53 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -198,7 +198,7 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
req2->additions_revision = VBG_SVN_REV;
req2->additions_features =
VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
- strlcpy(req2->name, VBG_VERSION_STRING,
+ strscpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
/*
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 4ccfd30c2a30..c47e62dc55da 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -270,6 +270,13 @@ static ssize_t host_features_show(struct device *dev,
static DEVICE_ATTR_RO(host_version);
static DEVICE_ATTR_RO(host_features);
+static struct attribute *vbg_pci_attrs[] = {
+ &dev_attr_host_version.attr,
+ &dev_attr_host_features.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vbg_pci);
+
/**
* Does the PCI detection and init of the device.
*
@@ -390,12 +397,6 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
}
pci_set_drvdata(pci, gdev);
- device_create_file(dev, &dev_attr_host_version);
- device_create_file(dev, &dev_attr_host_features);
-
- vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n",
- gdev->misc_device.minor, pci->irq, gdev->io_port,
- &mmio, &mmio_len);
return 0;
@@ -422,8 +423,6 @@ static void vbg_pci_remove(struct pci_dev *pci)
mutex_unlock(&vbg_gdev_mutex);
free_irq(pci->irq, gdev);
- device_remove_file(gdev->dev, &dev_attr_host_features);
- device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
misc_deregister(&gdev->misc_device);
vbg_core_exit(gdev);
@@ -488,6 +487,7 @@ MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
static struct pci_driver vbg_pci_driver = {
.name = DEVICE_NAME,
+ .dev_groups = vbg_pci_groups,
.id_table = vbg_pci_ids,
.probe = vbg_pci_probe,
.remove = vbg_pci_remove,
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ad258a9d3b9f..a6c86f916dbd 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -409,6 +409,9 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
if (!err)
return 0;
+ /* Is there an interrupt? If not give up. */
+ if (!(to_vp_device(vdev)->pci_dev->irq))
+ return err;
/* Finally fall back to regular interrupts. */
return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 4620e9d79dde..2e7689bb933b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/kmsan.h>
#include <linux/spinlock.h>
#include <xen/xen.h>
@@ -352,8 +353,15 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
- if (!vq->use_dma_api)
+ if (!vq->use_dma_api) {
+ /*
+ * If DMA is not used, KMSAN doesn't know that the scatterlist
+ * is initialized by the hardware. Explicitly check/unpoison it
+ * depending on the direction.
+ */
+ kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
return (dma_addr_t)sg_phys(sg);
+ }
/*
* We can't use dma_map_sg, because we don't use scatterlists in
@@ -1066,7 +1074,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
if (!queue) {
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr, GFP_KERNEL|__GFP_ZERO);
+ &dma_addr, GFP_KERNEL | __GFP_ZERO);
}
if (!queue)
return -ENOMEM;
@@ -1867,7 +1875,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!ring)
goto err;
@@ -1879,7 +1887,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!driver)
goto err;
@@ -1889,7 +1897,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!device)
goto err;
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index fa490aa4407c..db110cc442b1 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -611,7 +611,8 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
}
atomic_set(&block->refcnt, 1);
block->portid = nsp->portid;
- memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
+ block->request_cn = *cn;
+ memcpy(block->request_cn.data, cn->data, cn->len);
node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
/* Sneeky, when not bundling, reply_size is the allocated space
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 688922fc4edb..b64bc49c7f30 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1089,6 +1089,17 @@ config EBC_C384_WDT
WinSystems EBC-C384 motherboard. The timeout may be configured via
the timeout module parameter.
+config EXAR_WDT
+ tristate "Exar Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ help
+ Enables watchdog timer support for the watchdog timer present
+ in some Exar/MaxLinear UART chips like the XR28V38x.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exar_wdt.
+
config F71808E_WDT
tristate "Fintek F718xx, F818xx Super I/O Watchdog"
depends on X86
@@ -1315,7 +1326,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
select WATCHDOG_CORE
- depends on X86 && PCI
+ depends on (ARM64 || X86) && PCI
help
A software monitoring watchdog and NMI handling driver. This driver
will detect lockups and provide a stack trace. This is a driver that
@@ -1325,7 +1336,7 @@ config HP_WATCHDOG
config HPWDT_NMI_DECODING
bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on HP_WATCHDOG
+ depends on X86 && HP_WATCHDOG
default y
help
Enables the NMI handler for the watchdog pretimeout NMI and the iLO
@@ -1935,10 +1946,10 @@ config BOOKE_WDT
config BOOKE_WDT_DEFAULT_TIMEOUT
int "PowerPC Book-E Watchdog Timer Default Timeout"
depends on BOOKE_WDT
- default 38 if PPC_FSL_BOOK3E
- range 0 63 if PPC_FSL_BOOK3E
- default 3 if !PPC_FSL_BOOK3E
- range 0 3 if !PPC_FSL_BOOK3E
+ default 38 if PPC_E500
+ range 0 63 if PPC_E500
+ default 3 if !PPC_E500
+ range 0 3 if !PPC_E500
help
Select the default watchdog timer period to be used by the PowerPC
Book-E watchdog driver. A watchdog "event" occurs when the bit
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index cdeb119e6e61..d41e5f830ae7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
obj-$(CONFIG_EBC_C384_WDT) += ebc-c384_wdt.o
+obj-$(CONFIG_EXAR_WDT) += exar_wdt.o
obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
obj-$(CONFIG_GEODE_WDT) += geodewdt.o
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 854b1cc723cb..ac9fed1ef681 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -179,6 +179,8 @@ static int armada_37xx_wdt_set_timeout(struct watchdog_device *wdt,
dev->timeout = (u64)dev->clk_rate * timeout;
do_div(dev->timeout, CNTR_CTRL_PRESCALE_MIN);
+ set_counter_value(dev, CNTR_ID_WDOG, dev->timeout);
+
return 0;
}
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index bd06622813eb..0cff2adfbfc9 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -332,18 +332,18 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
reg &= config->ext_pulse_width_mask;
- if (of_property_read_bool(np, "aspeed,ext-push-pull"))
- reg |= WDT_PUSH_PULL_MAGIC;
+ if (of_property_read_bool(np, "aspeed,ext-active-high"))
+ reg |= WDT_ACTIVE_HIGH_MAGIC;
else
- reg |= WDT_OPEN_DRAIN_MAGIC;
+ reg |= WDT_ACTIVE_LOW_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
reg &= config->ext_pulse_width_mask;
- if (of_property_read_bool(np, "aspeed,ext-active-high"))
- reg |= WDT_ACTIVE_HIGH_MAGIC;
+ if (of_property_read_bool(np, "aspeed,ext-push-pull"))
+ reg |= WDT_PUSH_PULL_MAGIC;
else
- reg |= WDT_ACTIVE_LOW_MAGIC;
+ reg |= WDT_OPEN_DRAIN_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
}
diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
index 0b6999f3b6e8..4a20e07fbb69 100644
--- a/drivers/watchdog/bd9576_wdt.c
+++ b/drivers/watchdog/bd9576_wdt.c
@@ -9,8 +9,8 @@
#include <linux/gpio/consumer.h>
#include <linux/mfd/rohm-bd957x.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -202,10 +202,10 @@ static int bd957x_set_wdt_mode(struct bd9576_wdt_priv *priv, int hw_margin,
static int bd9576_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->parent->of_node;
struct bd9576_wdt_priv *priv;
u32 hw_margin[2];
u32 hw_margin_max = BD957X_WDT_DEFAULT_MARGIN, hw_margin_min = 0;
+ int count;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -221,40 +221,51 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- priv->gpiod_en = devm_gpiod_get_from_of_node(dev, dev->parent->of_node,
- "rohm,watchdog-enable-gpios",
- 0, GPIOD_OUT_LOW,
- "watchdog-enable");
+ priv->gpiod_en = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent),
+ "rohm,watchdog-enable",
+ GPIOD_OUT_LOW,
+ "watchdog-enable");
if (IS_ERR(priv->gpiod_en))
return dev_err_probe(dev, PTR_ERR(priv->gpiod_en),
"getting watchdog-enable GPIO failed\n");
- priv->gpiod_ping = devm_gpiod_get_from_of_node(dev, dev->parent->of_node,
- "rohm,watchdog-ping-gpios",
- 0, GPIOD_OUT_LOW,
- "watchdog-ping");
+ priv->gpiod_ping = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent),
+ "rohm,watchdog-ping",
+ GPIOD_OUT_LOW,
+ "watchdog-ping");
if (IS_ERR(priv->gpiod_ping))
return dev_err_probe(dev, PTR_ERR(priv->gpiod_ping),
"getting watchdog-ping GPIO failed\n");
- ret = of_property_read_variable_u32_array(np, "rohm,hw-timeout-ms",
- &hw_margin[0], 1, 2);
- if (ret < 0 && ret != -EINVAL)
- return ret;
+ count = device_property_count_u32(dev->parent, "rohm,hw-timeout-ms");
+ if (count < 0 && count != -EINVAL)
+ return count;
+
+ if (count > 0) {
+ if (count > ARRAY_SIZE(hw_margin))
+ return -EINVAL;
- if (ret == 1)
- hw_margin_max = hw_margin[0];
+ ret = device_property_read_u32_array(dev->parent,
+ "rohm,hw-timeout-ms",
+ hw_margin, count);
+ if (ret < 0)
+ return ret;
- if (ret == 2) {
- hw_margin_max = hw_margin[1];
- hw_margin_min = hw_margin[0];
+ if (count == 1)
+ hw_margin_max = hw_margin[0];
+
+ if (count == 2) {
+ hw_margin_max = hw_margin[1];
+ hw_margin_min = hw_margin[0];
+ }
}
ret = bd957x_set_wdt_mode(priv, hw_margin_max, hw_margin_min);
if (ret)
return ret;
- priv->always_running = of_property_read_bool(np, "always-running");
+ priv->always_running = device_property_read_bool(dev->parent,
+ "always-running");
watchdog_set_drvdata(&priv->wdd, priv);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 75da5cd02615..932a03f4436a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -27,7 +27,7 @@
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
@@ -45,7 +45,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
@@ -88,7 +88,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT period_to_sec(1)
-#else /* CONFIG_PPC_FSL_BOOK3E */
+#else /* CONFIG_PPC_E500 */
static unsigned long long period_to_sec(unsigned int period)
{
@@ -102,7 +102,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT 3 /* from Kconfig */
-#endif /* !CONFIG_PPC_FSL_BOOK3E */
+#endif /* !CONFIG_PPC_E500 */
static void __booke_wdt_set(void *data)
{
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index ce682942662c..e26609ad4c17 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -192,7 +192,7 @@ static void eurwdt_ping(void)
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
+ * write of data will do, as we don't define content meaning.
*/
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c
new file mode 100644
index 000000000000..35058d8b21bc
--- /dev/null
+++ b/drivers/watchdog/exar_wdt.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * exar_wdt.c - Driver for the watchdog present in some
+ * Exar/MaxLinear UART chips like the XR28V38x.
+ *
+ * (c) Copyright 2022 D. Müller <d.mueller@elsoft.ch>.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "exar_wdt"
+
+static const unsigned short sio_config_ports[] = { 0x2e, 0x4e };
+static const unsigned char sio_enter_keys[] = { 0x67, 0x77, 0x87, 0xA0 };
+#define EXAR_EXIT_KEY 0xAA
+
+#define EXAR_LDN 0x07
+#define EXAR_DID 0x20
+#define EXAR_VID 0x23
+#define EXAR_WDT 0x26
+#define EXAR_ACT 0x30
+#define EXAR_RTBASE 0x60
+
+#define EXAR_WDT_LDEV 0x08
+
+#define EXAR_VEN_ID 0x13A8
+#define EXAR_DEV_382 0x0382
+#define EXAR_DEV_384 0x0384
+
+/* WDT runtime registers */
+#define WDT_CTRL 0x00
+#define WDT_VAL 0x01
+
+#define WDT_UNITS_10MS 0x0 /* the 10 millisec unit of the HW is not used */
+#define WDT_UNITS_SEC 0x2
+#define WDT_UNITS_MIN 0x4
+
+/* default WDT control for WDTOUT signal activ / rearm by read */
+#define EXAR_WDT_DEF_CONF 0
+
+struct wdt_pdev_node {
+ struct list_head list;
+ struct platform_device *pdev;
+ const char name[16];
+};
+
+struct wdt_priv {
+ /* the lock for WDT io operations */
+ spinlock_t io_lock;
+ struct resource wdt_res;
+ struct watchdog_device wdt_dev;
+ unsigned short did;
+ unsigned short config_port;
+ unsigned char enter_key;
+ unsigned char unit;
+ unsigned char timeout;
+};
+
+#define WATCHDOG_TIMEOUT 60
+
+static int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. 1<=timeout<=15300, default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int exar_sio_enter(const unsigned short config_port,
+ const unsigned char key)
+{
+ if (!request_muxed_region(config_port, 2, DRV_NAME))
+ return -EBUSY;
+
+ /* write the ENTER-KEY twice */
+ outb(key, config_port);
+ outb(key, config_port);
+
+ return 0;
+}
+
+static void exar_sio_exit(const unsigned short config_port)
+{
+ outb(EXAR_EXIT_KEY, config_port);
+ release_region(config_port, 2);
+}
+
+static unsigned char exar_sio_read(const unsigned short config_port,
+ const unsigned char reg)
+{
+ outb(reg, config_port);
+ return inb(config_port + 1);
+}
+
+static void exar_sio_write(const unsigned short config_port,
+ const unsigned char reg, const unsigned char val)
+{
+ outb(reg, config_port);
+ outb(val, config_port + 1);
+}
+
+static unsigned short exar_sio_read16(const unsigned short config_port,
+ const unsigned char reg)
+{
+ unsigned char msb, lsb;
+
+ msb = exar_sio_read(config_port, reg);
+ lsb = exar_sio_read(config_port, reg + 1);
+
+ return (msb << 8) | lsb;
+}
+
+static void exar_sio_select_wdt(const unsigned short config_port)
+{
+ exar_sio_write(config_port, EXAR_LDN, EXAR_WDT_LDEV);
+}
+
+static void exar_wdt_arm(const struct wdt_priv *priv)
+{
+ unsigned short rt_base = priv->wdt_res.start;
+
+ /* write timeout value twice to arm watchdog */
+ outb(priv->timeout, rt_base + WDT_VAL);
+ outb(priv->timeout, rt_base + WDT_VAL);
+}
+
+static void exar_wdt_disarm(const struct wdt_priv *priv)
+{
+ unsigned short rt_base = priv->wdt_res.start;
+
+ /*
+ * use two accesses with different values to make sure
+ * that a combination of a previous single access and
+ * the ones below with the same value are not falsely
+ * interpreted as "arm watchdog"
+ */
+ outb(0xFF, rt_base + WDT_VAL);
+ outb(0, rt_base + WDT_VAL);
+}
+
+static int exar_wdt_start(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ unsigned short rt_base = priv->wdt_res.start;
+
+ spin_lock(&priv->io_lock);
+
+ exar_wdt_disarm(priv);
+ outb(priv->unit, rt_base + WDT_CTRL);
+ exar_wdt_arm(priv);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_stop(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+
+ spin_lock(&priv->io_lock);
+
+ exar_wdt_disarm(priv);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_keepalive(struct watchdog_device *wdog)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ unsigned short rt_base = priv->wdt_res.start;
+
+ spin_lock(&priv->io_lock);
+
+ /* reading the WDT_VAL reg will feed the watchdog */
+ inb(rt_base + WDT_VAL);
+
+ spin_unlock(&priv->io_lock);
+ return 0;
+}
+
+static int exar_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ bool unit_min = false;
+
+ /*
+ * if new timeout is bigger then 255 seconds, change the
+ * unit to minutes and round the timeout up to the next whole minute
+ */
+ if (t > 255) {
+ unit_min = true;
+ t = DIV_ROUND_UP(t, 60);
+ }
+
+ /* save for later use in exar_wdt_start() */
+ priv->unit = unit_min ? WDT_UNITS_MIN : WDT_UNITS_SEC;
+ priv->timeout = t;
+
+ wdog->timeout = unit_min ? t * 60 : t;
+
+ if (watchdog_hw_running(wdog))
+ exar_wdt_start(wdog);
+
+ return 0;
+}
+
+static const struct watchdog_info exar_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
+ .identity = "Exar/MaxLinear XR28V38x Watchdog",
+};
+
+static const struct watchdog_ops exar_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = exar_wdt_start,
+ .stop = exar_wdt_stop,
+ .ping = exar_wdt_keepalive,
+ .set_timeout = exar_wdt_set_timeout,
+};
+
+static int exar_wdt_config(struct watchdog_device *wdog,
+ const unsigned char conf)
+{
+ struct wdt_priv *priv = watchdog_get_drvdata(wdog);
+ int ret;
+
+ ret = exar_sio_enter(priv->config_port, priv->enter_key);
+ if (ret)
+ return ret;
+
+ exar_sio_select_wdt(priv->config_port);
+ exar_sio_write(priv->config_port, EXAR_WDT, conf);
+
+ exar_sio_exit(priv->config_port);
+
+ return 0;
+}
+
+static int __init exar_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wdt_priv *priv = dev->platform_data;
+ struct watchdog_device *wdt_dev = &priv->wdt_dev;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENXIO;
+
+ spin_lock_init(&priv->io_lock);
+
+ wdt_dev->info = &exar_wdt_info;
+ wdt_dev->ops = &exar_wdt_ops;
+ wdt_dev->min_timeout = 1;
+ wdt_dev->max_timeout = 255 * 60;
+
+ watchdog_init_timeout(wdt_dev, timeout, NULL);
+ watchdog_set_nowayout(wdt_dev, nowayout);
+ watchdog_stop_on_reboot(wdt_dev);
+ watchdog_stop_on_unregister(wdt_dev);
+ watchdog_set_drvdata(wdt_dev, priv);
+
+ ret = exar_wdt_config(wdt_dev, EXAR_WDT_DEF_CONF);
+ if (ret)
+ return ret;
+
+ exar_wdt_set_timeout(wdt_dev, timeout);
+ /* Make sure that the watchdog is not running */
+ exar_wdt_stop(wdt_dev);
+
+ ret = devm_watchdog_register_device(dev, wdt_dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "XR28V%X WDT initialized. timeout=%d sec (nowayout=%d)\n",
+ priv->did, timeout, nowayout);
+
+ return 0;
+}
+
+static unsigned short __init exar_detect(const unsigned short config_port,
+ const unsigned char key,
+ unsigned short *rt_base)
+{
+ int ret;
+ unsigned short base = 0;
+ unsigned short vid, did;
+
+ ret = exar_sio_enter(config_port, key);
+ if (ret)
+ return 0;
+
+ vid = exar_sio_read16(config_port, EXAR_VID);
+ did = exar_sio_read16(config_port, EXAR_DID);
+
+ /* check for the vendor and device IDs we currently know about */
+ if (vid == EXAR_VEN_ID &&
+ (did == EXAR_DEV_382 ||
+ did == EXAR_DEV_384)) {
+ exar_sio_select_wdt(config_port);
+ /* is device active? */
+ if (exar_sio_read(config_port, EXAR_ACT) == 0x01)
+ base = exar_sio_read16(config_port, EXAR_RTBASE);
+ }
+
+ exar_sio_exit(config_port);
+
+ if (base) {
+ pr_debug("Found a XR28V%X WDT (conf: 0x%x / rt: 0x%04x)\n",
+ did, config_port, base);
+ *rt_base = base;
+ return did;
+ }
+
+ return 0;
+}
+
+static struct platform_driver exar_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static LIST_HEAD(pdev_list);
+
+static int __init exar_wdt_register(struct wdt_priv *priv, const int idx)
+{
+ struct wdt_pdev_node *n;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&n->list);
+
+ scnprintf((char *)n->name, sizeof(n->name), DRV_NAME ".%d", idx);
+ priv->wdt_res.name = n->name;
+
+ n->pdev = platform_device_register_resndata(NULL, DRV_NAME, idx,
+ &priv->wdt_res, 1,
+ priv, sizeof(*priv));
+ if (IS_ERR(n->pdev)) {
+ kfree(n);
+ return PTR_ERR(n->pdev);
+ }
+
+ list_add_tail(&n->list, &pdev_list);
+
+ return 0;
+}
+
+static void exar_wdt_unregister(void)
+{
+ struct wdt_pdev_node *n, *t;
+
+ list_for_each_entry_safe(n, t, &pdev_list, list) {
+ platform_device_unregister(n->pdev);
+ list_del(&n->list);
+ kfree(n);
+ }
+}
+
+static int __init exar_wdt_init(void)
+{
+ int ret, i, j, idx = 0;
+
+ /* search for active Exar watchdogs on all possible locations */
+ for (i = 0; i < ARRAY_SIZE(sio_config_ports); i++) {
+ for (j = 0; j < ARRAY_SIZE(sio_enter_keys); j++) {
+ unsigned short did, rt_base = 0;
+
+ did = exar_detect(sio_config_ports[i],
+ sio_enter_keys[j],
+ &rt_base);
+
+ if (did) {
+ struct wdt_priv priv = {
+ .wdt_res = DEFINE_RES_IO(rt_base, 2),
+ .did = did,
+ .config_port = sio_config_ports[i],
+ .enter_key = sio_enter_keys[j],
+ };
+
+ ret = exar_wdt_register(&priv, idx);
+ if (!ret)
+ idx++;
+ }
+ }
+ }
+
+ if (!idx)
+ return -ENODEV;
+
+ ret = platform_driver_probe(&exar_wdt_driver, exar_wdt_probe);
+ if (ret)
+ exar_wdt_unregister();
+
+ return ret;
+}
+
+static void __exit exar_wdt_exit(void)
+{
+ exar_wdt_unregister();
+ platform_driver_unregister(&exar_wdt_driver);
+}
+
+module_init(exar_wdt_init);
+module_exit(exar_wdt_exit);
+
+MODULE_AUTHOR("David Müller <d.mueller@elsoft.ch>");
+MODULE_DESCRIPTION("Exar/MaxLinear Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index 21dcc7765688..442c5bf63ff4 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -47,21 +47,28 @@ struct ftwdt010_wdt *to_ftwdt010_wdt(struct watchdog_device *wdd)
return container_of(wdd, struct ftwdt010_wdt, wdd);
}
-static int ftwdt010_wdt_start(struct watchdog_device *wdd)
+static void ftwdt010_enable(struct ftwdt010_wdt *gwdt,
+ unsigned int timeout,
+ bool need_irq)
{
- struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd);
u32 enable;
- writel(wdd->timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD);
+ writel(timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD);
writel(WDRESTART_MAGIC, gwdt->base + FTWDT010_WDRESTART);
/* set clock before enabling */
enable = WDCR_CLOCK_5MHZ | WDCR_SYS_RST;
writel(enable, gwdt->base + FTWDT010_WDCR);
- if (gwdt->has_irq)
+ if (need_irq)
enable |= WDCR_WDINTR;
enable |= WDCR_ENABLE;
writel(enable, gwdt->base + FTWDT010_WDCR);
+}
+static int ftwdt010_wdt_start(struct watchdog_device *wdd)
+{
+ struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd);
+
+ ftwdt010_enable(gwdt, wdd->timeout, gwdt->has_irq);
return 0;
}
@@ -93,6 +100,13 @@ static int ftwdt010_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static int ftwdt010_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ ftwdt010_enable(to_ftwdt010_wdt(wdd), 0, false);
+ return 0;
+}
+
static irqreturn_t ftwdt010_wdt_interrupt(int irq, void *data)
{
struct ftwdt010_wdt *gwdt = data;
@@ -107,6 +121,7 @@ static const struct watchdog_ops ftwdt010_wdt_ops = {
.stop = ftwdt010_wdt_stop,
.ping = ftwdt010_wdt_ping,
.set_timeout = ftwdt010_wdt_set_timeout,
+ .restart = ftwdt010_wdt_restart,
.owner = THIS_MODULE,
};
@@ -156,7 +171,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq) {
+ if (irq > 0) {
ret = devm_request_irq(dev, irq, ftwdt010_wdt_interrupt, 0,
"watchdog bark", gwdt);
if (ret)
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a5006a58e0db..f79f932bca14 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -20,7 +20,9 @@
#include <linux/pci_ids.h>
#include <linux/types.h>
#include <linux/watchdog.h>
+#ifdef CONFIG_HPWDT_NMI_DECODING
#include <asm/nmi.h>
+#endif
#include <linux/crash_dump.h>
#define HPWDT_VERSION "2.0.4"
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 922b60374295..2897902090b3 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -9,12 +9,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
#define WDOG_CS 0x0
+#define WDOG_CS_FLG BIT(14)
#define WDOG_CS_CMD32EN BIT(13)
+#define WDOG_CS_PRES BIT(12)
#define WDOG_CS_ULK BIT(11)
#define WDOG_CS_RCS BIT(10)
#define LPO_CLK 0x1
@@ -39,60 +42,105 @@
#define DEFAULT_TIMEOUT 60
#define MAX_TIMEOUT 128
#define WDOG_CLOCK_RATE 1000
-#define WDOG_WAIT_TIMEOUT 20
+#define WDOG_ULK_WAIT_TIMEOUT 1000
+#define WDOG_RCS_WAIT_TIMEOUT 10000
+#define WDOG_RCS_POST_WAIT 3000
+
+#define RETRY_MAX 5
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0000);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+struct imx_wdt_hw_feature {
+ bool prescaler_enable;
+ u32 wdog_clock_rate;
+};
+
struct imx7ulp_wdt_device {
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
+ bool post_rcs_wait;
+ const struct imx_wdt_hw_feature *hw;
};
-static int imx7ulp_wdt_wait(void __iomem *base, u32 mask)
+static int imx7ulp_wdt_wait_ulk(void __iomem *base)
{
u32 val = readl(base + WDOG_CS);
- if (!(val & mask) && readl_poll_timeout_atomic(base + WDOG_CS, val,
- val & mask, 0,
- WDOG_WAIT_TIMEOUT))
+ if (!(val & WDOG_CS_ULK) &&
+ readl_poll_timeout_atomic(base + WDOG_CS, val,
+ val & WDOG_CS_ULK, 0,
+ WDOG_ULK_WAIT_TIMEOUT))
return -ETIMEDOUT;
return 0;
}
-static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
+static int imx7ulp_wdt_wait_rcs(struct imx7ulp_wdt_device *wdt)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ int ret = 0;
+ u32 val = readl(wdt->base + WDOG_CS);
+ u64 timeout = (val & WDOG_CS_PRES) ?
+ WDOG_RCS_WAIT_TIMEOUT * 256 : WDOG_RCS_WAIT_TIMEOUT;
+ unsigned long wait_min = (val & WDOG_CS_PRES) ?
+ WDOG_RCS_POST_WAIT * 256 : WDOG_RCS_POST_WAIT;
+
+ if (!(val & WDOG_CS_RCS) &&
+ readl_poll_timeout(wdt->base + WDOG_CS, val, val & WDOG_CS_RCS, 100,
+ timeout))
+ ret = -ETIMEDOUT;
+
+ /* Wait 2.5 clocks after RCS done */
+ if (wdt->post_rcs_wait)
+ usleep_range(wait_min, wait_min + 2000);
+
+ return ret;
+}
+static int _imx7ulp_wdt_enable(struct imx7ulp_wdt_device *wdt, bool enable)
+{
u32 val = readl(wdt->base + WDOG_CS);
int ret;
local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto enable_out;
if (enable)
writel(val | WDOG_CS_EN, wdt->base + WDOG_CS);
else
writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS);
- imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
-enable_out:
local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
return ret;
+
+enable_out:
+ local_irq_enable();
+ return ret;
}
-static bool imx7ulp_wdt_is_enabled(void __iomem *base)
+static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
{
- u32 val = readl(base + WDOG_CS);
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ int ret;
+ u32 val;
+ u32 loop = RETRY_MAX;
+
+ do {
+ ret = _imx7ulp_wdt_enable(wdt, enable);
+ val = readl(wdt->base + WDOG_CS);
+ } while (--loop > 0 && ((!!(val & WDOG_CS_EN)) != enable || ret));
- return val & WDOG_CS_EN;
+ if (loop == 0)
+ return -EBUSY;
+
+ return ret;
}
static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
@@ -114,26 +162,44 @@ static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
return imx7ulp_wdt_enable(wdog, false);
}
-static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
- unsigned int timeout)
+static int _imx7ulp_wdt_set_timeout(struct imx7ulp_wdt_device *wdt,
+ unsigned int toval)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- u32 val = WDOG_CLOCK_RATE * timeout;
int ret;
local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto timeout_out;
- writel(val, wdt->base + WDOG_TOVAL);
- imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
-
- wdog->timeout = timeout;
+ writel(toval, wdt->base + WDOG_TOVAL);
+ local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
+ return ret;
timeout_out:
local_irq_enable();
+ return ret;
+}
+static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ u32 toval = wdt->hw->wdog_clock_rate * timeout;
+ u32 val;
+ int ret;
+ u32 loop = RETRY_MAX;
+
+ do {
+ ret = _imx7ulp_wdt_set_timeout(wdt, toval);
+ val = readl(wdt->base + WDOG_TOVAL);
+ } while (--loop > 0 && (val != toval || ret));
+
+ if (loop == 0)
+ return -EBUSY;
+
+ wdog->timeout = timeout;
return ret;
}
@@ -173,29 +239,62 @@ static const struct watchdog_info imx7ulp_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static int imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+static int _imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout, unsigned int cs)
{
u32 val;
int ret;
local_irq_disable();
- /* unlock the wdog for reconfiguration */
- writel_relaxed(UNLOCK_SEQ0, base + WDOG_CNT);
- writel_relaxed(UNLOCK_SEQ1, base + WDOG_CNT);
- ret = imx7ulp_wdt_wait(base, WDOG_CS_ULK);
+
+ val = readl(wdt->base + WDOG_CS);
+ if (val & WDOG_CS_CMD32EN) {
+ writel(UNLOCK, wdt->base + WDOG_CNT);
+ } else {
+ mb();
+ /* unlock the wdog for reconfiguration */
+ writel_relaxed(UNLOCK_SEQ0, wdt->base + WDOG_CNT);
+ writel_relaxed(UNLOCK_SEQ1, wdt->base + WDOG_CNT);
+ mb();
+ }
+
+ ret = imx7ulp_wdt_wait_ulk(wdt->base);
if (ret)
goto init_out;
/* set an initial timeout value in TOVAL */
- writel(timeout, base + WDOG_TOVAL);
- /* enable 32bit command sequence and reconfigure */
- val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE |
- WDOG_CS_WAIT | WDOG_CS_STOP;
- writel(val, base + WDOG_CS);
- imx7ulp_wdt_wait(base, WDOG_CS_RCS);
+ writel(timeout, wdt->base + WDOG_TOVAL);
+ writel(cs, wdt->base + WDOG_CS);
+ local_irq_enable();
+ ret = imx7ulp_wdt_wait_rcs(wdt);
+
+ return ret;
init_out:
local_irq_enable();
+ return ret;
+}
+
+static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout)
+{
+ /* enable 32bit command sequence and reconfigure */
+ u32 val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE |
+ WDOG_CS_WAIT | WDOG_CS_STOP;
+ u32 cs, toval;
+ int ret;
+ u32 loop = RETRY_MAX;
+
+ if (wdt->hw->prescaler_enable)
+ val |= WDOG_CS_PRES;
+
+ do {
+ ret = _imx7ulp_wdt_init(wdt, timeout, val);
+ toval = readl(wdt->base + WDOG_TOVAL);
+ cs = readl(wdt->base + WDOG_CS);
+ cs &= ~(WDOG_CS_FLG | WDOG_CS_ULK | WDOG_CS_RCS);
+ } while (--loop > 0 && (cs != val || toval != timeout || ret));
+
+ if (loop == 0)
+ return -EBUSY;
return ret;
}
@@ -228,6 +327,15 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
return PTR_ERR(imx7ulp_wdt->clk);
}
+ imx7ulp_wdt->post_rcs_wait = true;
+ if (of_device_is_compatible(dev->of_node,
+ "fsl,imx8ulp-wdt")) {
+ dev_info(dev, "imx8ulp wdt probe\n");
+ imx7ulp_wdt->post_rcs_wait = false;
+ } else {
+ dev_info(dev, "imx7ulp wdt probe\n");
+ }
+
ret = clk_prepare_enable(imx7ulp_wdt->clk);
if (ret)
return ret;
@@ -248,14 +356,16 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
watchdog_set_drvdata(wdog, imx7ulp_wdt);
- ret = imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE);
+
+ imx7ulp_wdt->hw = of_device_get_match_data(dev);
+ ret = imx7ulp_wdt_init(imx7ulp_wdt, wdog->timeout * imx7ulp_wdt->hw->wdog_clock_rate);
if (ret)
return ret;
return devm_watchdog_register_device(dev, wdog);
}
-static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev)
+static int __maybe_unused imx7ulp_wdt_suspend_noirq(struct device *dev)
{
struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
@@ -267,30 +377,44 @@ static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx7ulp_wdt_resume(struct device *dev)
+static int __maybe_unused imx7ulp_wdt_resume_noirq(struct device *dev)
{
struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
- u32 timeout = imx7ulp_wdt->wdd.timeout * WDOG_CLOCK_RATE;
+ u32 timeout = imx7ulp_wdt->wdd.timeout * imx7ulp_wdt->hw->wdog_clock_rate;
int ret;
ret = clk_prepare_enable(imx7ulp_wdt->clk);
if (ret)
return ret;
- if (imx7ulp_wdt_is_enabled(imx7ulp_wdt->base))
- imx7ulp_wdt_init(imx7ulp_wdt->base, timeout);
-
- if (watchdog_active(&imx7ulp_wdt->wdd))
+ if (watchdog_active(&imx7ulp_wdt->wdd)) {
+ imx7ulp_wdt_init(imx7ulp_wdt, timeout);
imx7ulp_wdt_start(&imx7ulp_wdt->wdd);
+ imx7ulp_wdt_ping(&imx7ulp_wdt->wdd);
+ }
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx7ulp_wdt_pm_ops, imx7ulp_wdt_suspend,
- imx7ulp_wdt_resume);
+static const struct dev_pm_ops imx7ulp_wdt_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx7ulp_wdt_suspend_noirq,
+ imx7ulp_wdt_resume_noirq)
+};
+
+static const struct imx_wdt_hw_feature imx7ulp_wdt_hw = {
+ .prescaler_enable = false,
+ .wdog_clock_rate = 1000,
+};
+
+static const struct imx_wdt_hw_feature imx93_wdt_hw = {
+ .prescaler_enable = true,
+ .wdog_clock_rate = 125,
+};
static const struct of_device_id imx7ulp_wdt_dt_ids[] = {
- { .compatible = "fsl,imx7ulp-wdt", },
+ { .compatible = "fsl,imx8ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx7ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx93-wdt", .data = &imx93_wdt_hw, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx7ulp_wdt_dt_ids);
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index d3c9e2f6e63b..981a2f7c3bec 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -156,6 +156,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
int ret;
+ u32 ctrl_reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -189,13 +190,26 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&data->wdt_dev, nowayout);
watchdog_set_drvdata(&data->wdt_dev, data);
+ ctrl_reg = readl(data->reg_base + GXBB_WDT_CTRL_REG) &
+ GXBB_WDT_CTRL_EN;
+
+ if (ctrl_reg) {
+ /* Watchdog is running - keep it running but extend timeout
+ * to the maximum while setting the timebase
+ */
+ set_bit(WDOG_HW_RUNNING, &data->wdt_dev.status);
+ meson_gxbb_wdt_set_timeout(&data->wdt_dev,
+ GXBB_WDT_TCNT_SETUP_MASK / 1000);
+ }
+
/* Setup with 1ms timebase */
- writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) |
- GXBB_WDT_CTRL_EE_RESET |
- GXBB_WDT_CTRL_CLK_EN |
- GXBB_WDT_CTRL_CLKDIV_EN,
- data->reg_base + GXBB_WDT_CTRL_REG);
+ ctrl_reg |= ((clk_get_rate(data->clk) / 1000) &
+ GXBB_WDT_CTRL_DIV_MASK) |
+ GXBB_WDT_CTRL_EE_RESET |
+ GXBB_WDT_CTRL_CLK_EN |
+ GXBB_WDT_CTRL_CLKDIV_EN;
+ writel(ctrl_reg, data->reg_base + GXBB_WDT_CTRL_REG);
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
return devm_watchdog_register_device(dev, &data->wdt_dev);
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 28a24caa2627..a5dd1c230137 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -3,6 +3,7 @@
// Copyright (c) 2018 IBM Corp.
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
struct npcm_wdt {
struct watchdog_device wdd;
void __iomem *reg;
+ struct clk *clk;
};
static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd)
@@ -66,6 +68,9 @@ static int npcm_wdt_start(struct watchdog_device *wdd)
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
u32 val;
+ if (wdt->clk)
+ clk_prepare_enable(wdt->clk);
+
if (wdd->timeout < 2)
val = 0x800;
else if (wdd->timeout < 3)
@@ -100,6 +105,9 @@ static int npcm_wdt_stop(struct watchdog_device *wdd)
writel(0, wdt->reg);
+ if (wdt->clk)
+ clk_disable_unprepare(wdt->clk);
+
return 0;
}
@@ -147,6 +155,10 @@ static int npcm_wdt_restart(struct watchdog_device *wdd,
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ /* For reset, we start the WDT clock and leave it running. */
+ if (wdt->clk)
+ clk_prepare_enable(wdt->clk);
+
writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
udelay(1000);
@@ -191,6 +203,10 @@ static int npcm_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->reg))
return PTR_ERR(wdt->reg);
+ wdt->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(wdt->clk))
+ return PTR_ERR(wdt->clk);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 053ef3bde12d..6e9253761fc1 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -225,9 +225,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
wdt->freq = wdt->freq * 9 / 10;
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(&pdev->dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
}
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 6eea0ee4af49..974a4194a8fd 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -10,7 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -40,6 +40,11 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+enum rz_wdt_type {
+ WDT_RZG2L,
+ WDT_RZV2M,
+};
+
struct rzg2l_wdt_priv {
void __iomem *base;
struct watchdog_device wdev;
@@ -48,6 +53,7 @@ struct rzg2l_wdt_priv {
unsigned long delay;
struct clk *pclk;
struct clk *osc_clk;
+ enum rz_wdt_type devtype;
};
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
@@ -142,11 +148,29 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
clk_prepare_enable(priv->pclk);
clk_prepare_enable(priv->osc_clk);
- /* Generate Reset (WDTRSTB) Signal on parity error */
- rzg2l_wdt_write(priv, 0, PECR);
+ if (priv->devtype == WDT_RZG2L) {
+ /* Generate Reset (WDTRSTB) Signal on parity error */
+ rzg2l_wdt_write(priv, 0, PECR);
+
+ /* Force parity error */
+ rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ } else {
+ /* RZ/V2M doesn't have parity error registers */
+
+ wdev->timeout = 0;
+
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
- /* Force parity error */
- rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ /* Initialize watchdog counter register */
+ rzg2l_wdt_write(priv, 0, WDTTIM);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ /* Wait 2 consecutive overflow cycles for reset */
+ mdelay(DIV_ROUND_UP(2 * 0xFFFFF * 1000, priv->osc_clk_rate));
+ }
return 0;
}
@@ -227,6 +251,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to deassert");
+ priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+
pm_runtime_enable(&pdev->dev);
priv->wdev.info = &rzg2l_wdt_ident;
@@ -255,7 +281,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
}
static const struct of_device_id rzg2l_wdt_ids[] = {
- { .compatible = "renesas,rzg2l-wdt", },
+ { .compatible = "renesas,rzg2l-wdt", .data = (void *)WDT_RZG2L },
+ { .compatible = "renesas,rzv2m-wdt", .data = (void *)WDT_RZV2M },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 95919392927f..d3fc8ed886ff 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -60,9 +60,13 @@
#define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244
#define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620
#define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644
+#define EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT 0x1520
+#define EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN 0x1544
#define EXYNOS850_CLUSTER0_WDTRESET_BIT 24
#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
+#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25
+#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
@@ -236,6 +240,30 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = {
+ .mask_reset_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
static const struct of_device_id s3c2410_wdt_match[] = {
{ .compatible = "samsung,s3c2410-wdt",
.data = &drv_data_s3c2410 },
@@ -249,6 +277,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos7 },
{ .compatible = "samsung,exynos850-wdt",
.data = &drv_data_exynos850_cl0 },
+ { .compatible = "samsung,exynosautov9-wdt",
+ .data = &drv_data_exynosautov9_cl0 },
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -630,8 +660,9 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev)
}
#ifdef CONFIG_OF
- /* Choose Exynos850 driver data w.r.t. cluster index */
- if (variant == &drv_data_exynos850_cl0) {
+ /* Choose Exynos850/ExynosAutov9 driver data w.r.t. cluster index */
+ if (variant == &drv_data_exynos850_cl0 ||
+ variant == &drv_data_exynosautov9_cl0) {
u32 index;
int err;
@@ -644,9 +675,11 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev)
switch (index) {
case 0:
- return &drv_data_exynos850_cl0;
+ return variant;
case 1:
- return &drv_data_exynos850_cl1;
+ return (variant == &drv_data_exynos850_cl0) ?
+ &drv_data_exynos850_cl1 :
+ &drv_data_exynosautov9_cl1;
default:
dev_err(dev, "wrong cluster index: %u\n", index);
return NULL;
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 2d0a06a158a8..82ac5d19f519 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -238,7 +238,7 @@ static int sa1100dog_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver sa1100dog_driver = {
+static struct platform_driver sa1100dog_driver = {
.driver.name = "sa1100_wdt",
.probe = sa1100dog_probe,
.remove = sa1100dog_remove,
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index ae54dd33e233..fb426b7d81da 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -65,6 +65,12 @@ static struct pci_dev *sp5100_tco_pci;
/* module parameters */
+#define WATCHDOG_ACTION 0
+static bool action = WATCHDOG_ACTION;
+module_param(action, bool, 0);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires, 0 to reset, 1 to poweroff (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat. */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
module_param(heartbeat, int, 0);
@@ -297,8 +303,11 @@ static int sp5100_tco_timer_init(struct sp5100_tco *tco)
if (val & SP5100_WDT_FIRED)
wdd->bootstatus = WDIOF_CARDRESET;
- /* Set watchdog action to reset the system */
- val &= ~SP5100_WDT_ACTION_RESET;
+ /* Set watchdog action */
+ if (action)
+ val |= SP5100_WDT_ACTION_RESET;
+ else
+ val &= ~SP5100_WDT_ACTION_RESET;
writel(val, SP5100_WDT_CONTROL(tco->tcobase));
/* Set a reasonable heartbeat before we stop the timer */
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 355e428c0b99..36b4a660928d 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl.h>
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 56a4a4030ca9..bc33b63c5a5d 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -113,6 +113,10 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
#define W836X7HF_WDT_CSR 0xf7
#define NCT6102D_WDT_CSR 0xf2
+#define WDT_CSR_STATUS 0x10
+#define WDT_CSR_KBD 0x40
+#define WDT_CSR_MOUSE 0x80
+
static void superio_outb(int reg, int val)
{
outb(reg, WDT_EFER);
@@ -244,8 +248,12 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
t = superio_inb(cr_wdt_control) & ~0x0C;
superio_outb(cr_wdt_control, t);
- /* reset trigger, disable keyboard & mouse turning off watchdog */
- t = superio_inb(cr_wdt_csr) & ~0xD0;
+ t = superio_inb(cr_wdt_csr);
+ if (t & WDT_CSR_STATUS)
+ wdog->bootstatus |= WDIOF_CARDRESET;
+
+ /* reset status, disable keyboard & mouse turning off watchdog */
+ t &= ~(WDT_CSR_STATUS | WDT_CSR_KBD | WDT_CSR_MOUSE);
superio_outb(cr_wdt_csr, t);
superio_exit();
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index fd64ae77780a..31bf21ceaf48 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -321,7 +321,7 @@ static int wdt_release(struct inode *inode, struct file *file)
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
+ * write of data will do, as we don't define content meaning.
*/
static ssize_t wdt_write(struct file *file, const char __user *buf,
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 3fe8a7edc252..c777a612d932 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -38,6 +38,9 @@
#include "watchdog_core.h" /* For watchdog_dev_register/... */
+#define CREATE_TRACE_POINTS
+#include <trace/events/watchdog.h>
+
static DEFINE_IDA(watchdog_ida);
static int stop_on_reboot = -1;
@@ -163,6 +166,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
int ret;
ret = wdd->ops->stop(wdd);
+ trace_watchdog_stop(wdd, ret);
if (ret)
return NOTIFY_BAD;
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 54903f3c851e..55574ed42504 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -47,6 +47,8 @@
#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
+#include <trace/events/watchdog.h>
+
/* the dev_t structure to store the dynamically allocated watchdog devices */
static dev_t watchdog_devt;
/* Reference to watchdog device behind /dev/watchdog */
@@ -157,10 +159,13 @@ static int __watchdog_ping(struct watchdog_device *wdd)
wd_data->last_hw_keepalive = now;
- if (wdd->ops->ping)
+ if (wdd->ops->ping) {
err = wdd->ops->ping(wdd); /* ping the watchdog */
- else
+ trace_watchdog_ping(wdd, err);
+ } else {
err = wdd->ops->start(wdd); /* restart watchdog */
+ trace_watchdog_start(wdd, err);
+ }
if (err == 0)
watchdog_hrtimer_pretimeout_start(wdd);
@@ -259,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd)
}
} else {
err = wdd->ops->start(wdd);
+ trace_watchdog_start(wdd, err);
if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
wd_data->last_keepalive = started_at;
@@ -297,6 +303,7 @@ static int watchdog_stop(struct watchdog_device *wdd)
if (wdd->ops->stop) {
clear_bit(WDOG_HW_RUNNING, &wdd->status);
err = wdd->ops->stop(wdd);
+ trace_watchdog_stop(wdd, err);
} else {
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
@@ -369,6 +376,7 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
if (wdd->ops->set_timeout) {
err = wdd->ops->set_timeout(wdd, timeout);
+ trace_watchdog_set_timeout(wdd, timeout, err);
} else {
wdd->timeout = timeout;
/* Disable pretimeout if it doesn't fit the new timeout */
@@ -1015,7 +1023,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
wd_data->dev.groups = wdd->groups;
wd_data->dev.release = watchdog_core_data_release;
dev_set_drvdata(&wd_data->dev, wdd);
- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+ err = dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+ if (err) {
+ put_device(&wd_data->dev);
+ return err;
+ }
kthread_init_work(&wd_data->work, watchdog_ping_work);
hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index aeadaa07c891..ce7a4a9e4b03 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -342,9 +342,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return -EINVAL;
wdat->period = tbl->timer_period;
- wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count;
- wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count;
- wdat->wdd.min_timeout = 1;
+ wdat->wdd.min_timeout = DIV_ROUND_UP(wdat->period * tbl->min_count, 1000);
+ wdat->wdd.max_timeout = wdat->period * tbl->max_count / 1000;
wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED;
wdat->wdd.info = &wdat_wdt_info;
wdat->wdd.ops = &wdat_wdt_ops;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a65bd92121a5..d5d7c402b651 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -56,7 +56,7 @@ config XEN_MEMORY_HOTPLUG_LIMIT
depends on XEN_HAVE_PVMMU
depends on MEMORY_HOTPLUG
help
- Maxmium amount of memory (in GiB) that a PV guest can be
+ Maximum amount of memory (in GiB) that a PV guest can be
expanded to when using memory hotplug.
A PV guest can have more memory than this limit if is
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 40ef379c28ab..9c286b2a1900 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -44,9 +44,10 @@ struct gntdev_unmap_notify {
};
struct gntdev_grant_map {
+ atomic_t in_use;
struct mmu_interval_notifier notifier;
+ bool notifier_init;
struct list_head next;
- struct vm_area_struct *vma;
int index;
int count;
int flags;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 84b143eef395..4d9a3050de6a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -286,6 +286,9 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
*/
}
+ if (use_ptemod && map->notifier_init)
+ mmu_interval_notifier_remove(&map->notifier);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -298,7 +301,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
- unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
(1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
@@ -367,8 +370,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- if (!use_ptemod)
- alloced++;
+ alloced++;
} else if (!err)
err = -EINVAL;
@@ -377,8 +379,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay) {
- if (map->map_ops[i].status == GNTST_okay)
- alloced++;
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
} else if (!err)
err = -EINVAL;
@@ -394,8 +395,14 @@ static void __unmap_grant_pages_done(int result,
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
+ int successful_unmaps = 0;
+ int live_grants;
for (i = 0; i < data->count; i++) {
+ if (map->unmap_ops[offset + i].status == GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("unmap handle=%d st=%d\n",
@@ -403,6 +410,10 @@ static void __unmap_grant_pages_done(int result,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
+ if (map->kunmap_ops[offset + i].status == GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
+ successful_unmaps++;
+
WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("kunmap handle=%u st=%d\n",
@@ -411,11 +422,15 @@ static void __unmap_grant_pages_done(int result,
map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
}
}
+
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
- atomic_sub(data->count, &map->live_grants);
+ live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
+ if (WARN_ON(live_grants < 0))
+ pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
+ __func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
@@ -496,11 +511,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
- if (use_ptemod) {
- WARN_ON(map->vma != vma);
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
+
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
@@ -528,29 +539,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
+ unsigned long map_start, map_end;
if (!mmu_notifier_range_blockable(range))
return false;
+ map_start = map->pages_vm_start;
+ map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
+
/*
* If the VMA is split or otherwise changed the notifier is not
* updated, but we don't want to process VA's outside the modified
* VMA. FIXME: It would be much more understandable to just prevent
* modifying the VMA in the first place.
*/
- if (map->vma->vm_start >= range->end ||
- map->vma->vm_end <= range->start)
+ if (map_start >= range->end || map_end <= range->start)
return true;
- mstart = max(range->start, map->vma->vm_start);
- mend = min(range->end, map->vma->vm_end);
+ mstart = max(range->start, map_start);
+ mend = min(range->end, map_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- range->start, range->end, mstart, mend);
- unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >> PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
+ map->index, map->count, map_start, map_end,
+ range->start, range->end, mstart, mend);
+ unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
return true;
}
@@ -1030,18 +1042,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
return -EINVAL;
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
- index, count, vma->vm_start, vma->vm_pgoff);
+ index, count, vma->vm_start, vma->vm_pgoff);
mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (use_ptemod && map->vma)
+ if (!atomic_add_unless(&map->in_use, 1, 1))
goto unlock_out;
- if (atomic_read(&map->live_grants)) {
- err = -EAGAIN;
- goto unlock_out;
- }
+
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1062,15 +1071,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
+ map->pages_vm_start = vma->vm_start;
+
if (use_ptemod) {
- map->vma = vma;
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err) {
- map->vma = NULL;
+ if (err)
goto out_unlock_put;
- }
+
+ map->notifier_init = true;
}
mutex_unlock(&priv->lock);
@@ -1087,7 +1097,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
*/
mmu_interval_read_begin(&map->notifier);
- map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1116,13 +1125,8 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod) {
+ if (use_ptemod)
unmap_grant_pages(map, 0, map->count);
- if (map->vma) {
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
- }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 8973fc1e9ccc..daa525df7bdc 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -25,31 +25,46 @@ struct xen_grant_dma_data {
bool broken;
};
-static DEFINE_XARRAY(xen_grant_dma_devices);
+static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
static inline dma_addr_t grant_to_dma(grant_ref_t grant)
{
- return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+ return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
}
static inline grant_ref_t dma_to_grant(dma_addr_t dma)
{
- return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
+ return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
}
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
{
struct xen_grant_dma_data *data;
+ unsigned long flags;
- xa_lock(&xen_grant_dma_devices);
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
- xa_unlock(&xen_grant_dma_devices);
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
return data;
}
+static int store_xen_grant_dma_data(struct device *dev,
+ struct xen_grant_dma_data *data)
+{
+ unsigned long flags;
+ int ret;
+
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
+ ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+ GFP_ATOMIC));
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
+
+ return ret;
+}
+
/*
* DMA ops for Xen frontends (e.g. virtio).
*
@@ -64,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned int i, n_pages = XEN_PFN_UP(size);
unsigned long pfn;
grant_ref_t grant;
void *ret;
@@ -76,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
if (unlikely(data->broken))
return NULL;
- ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+ ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
if (!ret)
return NULL;
pfn = virt_to_pfn(ret);
if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
- free_pages_exact(ret, n_pages * PAGE_SIZE);
+ free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
return NULL;
}
@@ -101,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned int i, n_pages = XEN_PFN_UP(size);
grant_ref_t grant;
data = find_xen_grant_dma_data(dev);
@@ -123,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
gnttab_free_grant_reference_seq(grant, n_pages);
- free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+ free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
}
static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
@@ -153,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned long dma_offset = xen_offset_in_page(offset),
+ pfn_offset = XEN_PFN_DOWN(offset);
+ unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
@@ -172,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
- xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
+ pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
+ dir == DMA_TO_DEVICE);
}
- dma_handle = grant_to_dma(grant) + offset;
+ dma_handle = grant_to_dma(grant) + dma_offset;
return dma_handle;
}
@@ -185,7 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
- unsigned int i, n_pages = PFN_UP(size);
+ unsigned long dma_offset = xen_offset_in_page(dma_handle);
+ unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
if (WARN_ON(dir == DMA_NONE))
@@ -273,72 +292,91 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
-bool xen_is_grant_dma_device(struct device *dev)
+static bool xen_is_dt_grant_dma_device(struct device *dev)
{
struct device_node *iommu_np;
bool has_iommu;
- /* XXX Handle only DT devices for now */
- if (!dev->of_node)
- return false;
-
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
- has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
+ has_iommu = iommu_np &&
+ of_device_is_compatible(iommu_np, "xen,grant-dma");
of_node_put(iommu_np);
return has_iommu;
}
+bool xen_is_grant_dma_device(struct device *dev)
+{
+ /* XXX Handle only DT devices for now */
+ if (dev->of_node)
+ return xen_is_dt_grant_dma_device(dev);
+
+ return false;
+}
+
bool xen_virtio_mem_acc(struct virtio_device *dev)
{
- if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
+ if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
return true;
return xen_is_grant_dma_device(dev->dev.parent);
}
-void xen_grant_setup_dma_ops(struct device *dev)
+static int xen_dt_grant_init_backend_domid(struct device *dev,
+ struct xen_grant_dma_data *data)
{
- struct xen_grant_dma_data *data;
struct of_phandle_args iommu_spec;
- data = find_xen_grant_dma_data(dev);
- if (data) {
- dev_err(dev, "Xen grant DMA data is already created\n");
- return;
- }
-
- /* XXX ACPI device unsupported for now */
- if (!dev->of_node)
- goto err;
-
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
dev_err(dev, "Cannot parse iommus property\n");
- goto err;
+ return -ESRCH;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
dev_err(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
- goto err;
+ return -ESRCH;
}
of_node_put(iommu_spec.np);
+ /*
+ * The endpoint ID here means the ID of the domain where the
+ * corresponding backend is running
+ */
+ data->backend_domid = iommu_spec.args[0];
+
+ return 0;
+}
+
+void xen_grant_setup_dma_ops(struct device *dev)
+{
+ struct xen_grant_dma_data *data;
+
+ data = find_xen_grant_dma_data(dev);
+ if (data) {
+ dev_err(dev, "Xen grant DMA data is already created\n");
+ return;
+ }
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
goto err;
- /*
- * The endpoint ID here means the ID of the domain where the corresponding
- * backend is running
- */
- data->backend_domid = iommu_spec.args[0];
+ if (dev->of_node) {
+ if (xen_dt_grant_init_backend_domid(dev, data))
+ goto err;
+ } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
+ dev_info(dev, "Using dom0 as backend\n");
+ data->backend_domid = 0;
+ } else {
+ /* XXX ACPI device unsupported for now */
+ goto err;
+ }
- if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
- GFP_KERNEL))) {
+ if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
goto err;
}
@@ -348,9 +386,20 @@ void xen_grant_setup_dma_ops(struct device *dev)
return;
err:
+ devm_kfree(dev, data);
dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
}
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ bool ret = xen_virtio_mem_acc(dev);
+
+ if (ret)
+ xen_grant_setup_dma_ops(dev->dev.parent);
+
+ return ret;
+}
+
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index e88e8f6f0a33..fae50a24630b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -282,7 +282,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
struct page, lru);
struct privcmd_mmap_entry *msg = page_address(page);
- vma = find_vma(mm, msg->va);
+ vma = vma_lookup(mm, msg->va);
rc = -EINVAL;
if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index bde63ef677b8..d171091eec12 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(passthrough,
" frontend (for example, a device at 06:01.b will still appear at\n"\
" 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\
" exposed PCI devices to its driver domains. This may be required\n"\
- " for drivers which depend on finding their hardward in certain\n"\
+ " for drivers which depend on finding their hardware in certain\n"\
" bus/slot locations.");
static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d3dcda344989..6106ed93817d 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -333,18 +333,6 @@ static int32_t scsiback_result(int32_t result)
case DID_TRANSPORT_FAILFAST:
host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
break;
- case DID_TARGET_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE;
- break;
- case DID_NEXUS_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE;
- break;
- case DID_ALLOC_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE;
- break;
- case DID_MEDIUM_ERROR:
- host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR;
- break;
case DID_TRANSPORT_MARGINAL:
host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
break;